1
0
Fork 1
mirror of https://github.com/NixOS/nixpkgs.git synced 2024-11-30 09:31:01 +00:00
nixpkgs/pkgs/os-specific/linux/kernel/copperhead-4-14.patch
2018-06-10 21:00:47 -04:00

2865 lines
93 KiB
Diff

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 0380a45ecf4b..39956a3ef645 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -490,16 +490,6 @@
nosocket -- Disable socket memory accounting.
nokmem -- Disable kernel memory accounting.
- checkreqprot [SELINUX] Set initial checkreqprot flag value.
- Format: { "0" | "1" }
- See security/selinux/Kconfig help text.
- 0 -- check protection applied by kernel (includes
- any implied execute protection).
- 1 -- check protection requested by application.
- Default value is set via a kernel config option.
- Value can be changed at runtime via
- /selinux/checkreqprot.
-
cio_ignore= [S390]
See Documentation/s390/CommonIO for details.
clk_ignore_unused
@@ -2899,6 +2889,11 @@
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
+ extra_latent_entropy
+ Enable a very simple form of latent entropy extraction
+ from the first 4GB of memory as the bootmem allocator
+ passes the memory pages to the buddy allocator.
+
pcbit= [HW,ISDN]
pcd. [PARIDE]
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 694968c7523c..002d86416ef8 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -91,6 +91,7 @@ show up in /proc/sys/kernel:
- sysctl_writes_strict
- tainted
- threads-max
+- tiocsti_restrict
- unknown_nmi_panic
- watchdog
- watchdog_thresh
@@ -999,6 +1000,26 @@ available RAM pages threads-max is reduced accordingly.
==============================================================
+tiocsti_restrict:
+
+This toggle indicates whether unprivileged users are prevented
+from using the TIOCSTI ioctl to inject commands into other processes
+which share a tty session.
+
+When tiocsti_restrict is set to (0) there are no restrictions(accept
+the default restriction of only being able to injection commands into
+one's own tty). When tiocsti_restrict is set to (1), users must
+have CAP_SYS_ADMIN to use the TIOCSTI ioctl.
+
+When user namespaces are in use, the check for the capability
+CAP_SYS_ADMIN is done against the user namespace that originally
+opened the tty.
+
+The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the
+default value of tiocsti_restrict.
+
+==============================================================
+
unknown_nmi_panic:
The value in this file affects behavior of handling NMI. When the
diff --git a/Makefile b/Makefile
index 787cf6605209..e4fda5330730 100644
--- a/Makefile
+++ b/Makefile
@@ -710,6 +710,9 @@ endif
KBUILD_CFLAGS += $(stackp-flag)
ifeq ($(cc-name),clang)
+ifdef CONFIG_LOCAL_INIT
+KBUILD_CFLAGS += -fsanitize=local-init
+endif
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
diff --git a/arch/Kconfig b/arch/Kconfig
index 400b9e1b2f27..4637096f7902 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -440,6 +440,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
is some slowdown of the boot process (about 0.5%) and fork and
irq processing.
+ When extra_latent_entropy is passed on the kernel command line,
+ entropy will be extracted from up to the first 4GB of RAM while the
+ runtime memory allocator is being initialized. This costs even more
+ slowdown of the boot process.
+
Note that entropy extracted this way is not cryptographically
secure!
@@ -533,7 +538,7 @@ config CC_STACKPROTECTOR
choice
prompt "Stack Protector buffer overflow detection"
depends on HAVE_CC_STACKPROTECTOR
- default CC_STACKPROTECTOR_NONE
+ default CC_STACKPROTECTOR_STRONG
help
This option turns on the "stack-protector" GCC feature. This
feature puts, at the beginning of functions, a canary value on
@@ -735,7 +740,7 @@ config ARCH_MMAP_RND_BITS
int "Number of bits to use for ASLR of mmap base address" if EXPERT
range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
- default ARCH_MMAP_RND_BITS_MIN
+ default ARCH_MMAP_RND_BITS_MAX
depends on HAVE_ARCH_MMAP_RND_BITS
help
This value can be used to select the number of bits to use to
@@ -769,7 +774,7 @@ config ARCH_MMAP_RND_COMPAT_BITS
int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
- default ARCH_MMAP_RND_COMPAT_BITS_MIN
+ default ARCH_MMAP_RND_COMPAT_BITS_MAX
depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
help
This value can be used to select the number of bits to use to
@@ -952,6 +957,7 @@ config ARCH_HAS_REFCOUNT
config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
+ default y
help
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2d5f7aca156d..aa4839a74c6a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -918,6 +918,7 @@ endif
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+ default y
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
@@ -1044,6 +1045,7 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
+ default y
help
Randomizes the virtual address at which the kernel image is
loaded, as a security feature that deters exploit attempts
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index cc6bd559af85..01d5442d4722 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -45,6 +45,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select ARM64_PTDUMP_CORE
+ default y
---help---
Generate a warning if any W+X mappings are found at boot.
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 34480e9af2e7..26304242250c 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 33be513ef24c..6f0c0e3ef0dd 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -114,10 +114,10 @@
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is above 4GB to leave the entire 32-bit address
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+#define ELF_ET_DYN_BASE 0x100000000UL
#ifndef __ASSEMBLY__
@@ -158,10 +158,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
/* 1GB of VA */
#ifdef CONFIG_COMPAT
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
- 0x7ff >> (PAGE_SHIFT - 12) : \
- 0x3ffff >> (PAGE_SHIFT - 12))
+ ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
+ ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
#else
-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
+#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
#endif
#ifdef __AARCH64EB__
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9e773732520c..91359f45b5fc 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -419,9 +419,9 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (is_compat_task())
- return randomize_page(mm->brk, SZ_32M);
+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
else
- return randomize_page(mm->brk, SZ_1G);
+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
}
/*
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7483cd514c32..835a86c45fb0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1153,8 +1153,7 @@ config VM86
default X86_LEGACY_VM86
config X86_16BIT
- bool "Enable support for 16-bit segments" if EXPERT
- default y
+ bool "Enable support for 16-bit segments"
depends on MODIFY_LDT_SYSCALL
---help---
This option is required by programs like Wine to run 16-bit
@@ -2228,7 +2227,7 @@ config COMPAT_VDSO
choice
prompt "vsyscall table for legacy applications"
depends on X86_64
- default LEGACY_VSYSCALL_EMULATE
+ default LEGACY_VSYSCALL_NONE
help
Legacy user code that does not know how to find the vDSO expects
to be able to issue three syscalls by calling fixed addresses in
@@ -2318,8 +2317,7 @@ config CMDLINE_OVERRIDE
be set to 'N' under normal conditions.
config MODIFY_LDT_SYSCALL
- bool "Enable the LDT (local descriptor table)" if EXPERT
- default y
+ bool "Enable the LDT (local descriptor table)"
---help---
Linux can allow user programs to install a per-process x86
Local Descriptor Table (LDT) using the modify_ldt(2) system
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 6293a8768a91..add82e0f1df3 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -101,6 +101,7 @@ config EFI_PGT_DUMP
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select X86_PTDUMP_CORE
+ default y
---help---
Generate a warning if any W+X mappings are found at boot.
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index e32fc1f274d8..d08acc76502a 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 1911310959f8..bba8dbbc07a8 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -203,55 +203,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
#ifdef CONFIG_X86_64
-/*
- * Put the vdso above the (randomized) stack with another randomized
- * offset. This way there is no hole in the middle of address space.
- * To save memory make sure it is still in the same PTE as the stack
- * top. This doesn't give that many random bits.
- *
- * Note that this algorithm is imperfect: the distribution of the vdso
- * start address within a PMD is biased toward the end.
- *
- * Only used for the 64-bit and x32 vdsos.
- */
-static unsigned long vdso_addr(unsigned long start, unsigned len)
-{
- unsigned long addr, end;
- unsigned offset;
-
- /*
- * Round up the start address. It can start out unaligned as a result
- * of stack start randomization.
- */
- start = PAGE_ALIGN(start);
-
- /* Round the lowest possible end address up to a PMD boundary. */
- end = (start + len + PMD_SIZE - 1) & PMD_MASK;
- if (end >= TASK_SIZE_MAX)
- end = TASK_SIZE_MAX;
- end -= len;
-
- if (end > start) {
- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
- addr = start + (offset << PAGE_SHIFT);
- } else {
- addr = start;
- }
-
- /*
- * Forcibly align the final address in case we have a hardware
- * issue that requires alignment for performance reasons.
- */
- addr = align_vdso_addr(addr);
-
- return addr;
-}
-
static int map_vdso_randomized(const struct vdso_image *image)
{
- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
-
- return map_vdso(image, addr);
+ return map_vdso(image, 0);
}
#endif
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 3a091cea36c5..0931c05a3348 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -249,11 +249,11 @@ extern int force_personality32;
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is above 4GB to leave the entire 32-bit address
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
- (DEFAULT_MAP_WINDOW / 3 * 2))
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
@@ -312,8 +312,8 @@ extern unsigned long get_mmap_base(int is_legacy);
#ifdef CONFIG_X86_32
-#define __STACK_RND_MASK(is32bit) (0x7ff)
-#define STACK_RND_MASK (0x7ff)
+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
+#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
#define ARCH_DLINFO ARCH_DLINFO_IA32
@@ -322,7 +322,11 @@ extern unsigned long get_mmap_base(int is_legacy);
#else /* CONFIG_X86_32 */
/* 1GB for 64bit, 8MB for 32bit */
-#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
+#ifdef CONFIG_COMPAT
+#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
+#else
+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
+#endif
#define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
#define ARCH_DLINFO \
@@ -380,5 +384,4 @@ struct va_alignment {
} ____cacheline_aligned;
extern struct va_alignment va_align;
-extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 704f31315dde..bb82b6344a7b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -253,6 +253,7 @@ static inline void cr4_set_bits(unsigned long mask)
unsigned long cr4;
cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ BUG_ON(cr4 != __read_cr4());
if ((cr4 | mask) != cr4) {
cr4 |= mask;
this_cpu_write(cpu_tlbstate.cr4, cr4);
@@ -266,6 +267,7 @@ static inline void cr4_clear_bits(unsigned long mask)
unsigned long cr4;
cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ BUG_ON(cr4 != __read_cr4());
if ((cr4 & ~mask) != cr4) {
cr4 &= ~mask;
this_cpu_write(cpu_tlbstate.cr4, cr4);
@@ -278,6 +280,7 @@ static inline void cr4_toggle_bits(unsigned long mask)
unsigned long cr4;
cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ BUG_ON(cr4 != __read_cr4());
cr4 ^= mask;
this_cpu_write(cpu_tlbstate.cr4, cr4);
__write_cr4(cr4);
@@ -386,6 +389,7 @@ static inline void __native_flush_tlb_global(void)
raw_local_irq_save(flags);
cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ BUG_ON(cr4 != __read_cr4());
/* toggle PGE */
native_write_cr4(cr4 ^ X86_CR4_PGE);
/* write old PGE again and flush TLBs */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 48e98964ecad..a94dc690612f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1637,7 +1637,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
- x86_configure_nx();
x2apic_setup();
/*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 988a98f34c66..dc36d2d9078a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -40,6 +40,8 @@
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/spec-ctrl.h>
+#include <asm/elf.h>
+#include <linux/sizes.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -719,7 +721,10 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- return randomize_page(mm->brk, 0x02000000);
+ if (mmap_is_ia32())
+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
+ else
+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
}
/*
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index a63fe77b3217..e1085e76043e 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -54,13 +54,6 @@ static unsigned long get_align_bits(void)
return va_align.bits & get_align_mask();
}
-unsigned long align_vdso_addr(unsigned long addr)
-{
- unsigned long align_mask = get_align_mask();
- addr = (addr + align_mask) & ~align_mask;
- return addr | get_align_bits();
-}
-
static int __init control_va_addr_alignment(char *str)
{
/* guard against enabling this on other CPU families */
@@ -122,10 +115,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
}
*begin = get_mmap_base(1);
- if (in_compat_syscall())
- *end = task_size_32bit();
- else
- *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
+ *end = get_mmap_base(0);
}
unsigned long
@@ -206,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = PAGE_SIZE;
+ info.low_limit = get_mmap_base(1);
info.high_limit = get_mmap_base(0);
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 3141e67ec24c..e93173193f60 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -558,7 +558,7 @@ static void __init pagetable_init(void)
permanent_kmaps_init(pgd_base);
}
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
+pteval_t __supported_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* user-defined highmem size */
@@ -865,7 +865,7 @@ int arch_remove_memory(u64 start, u64 size)
#endif
#endif
-int kernel_set_to_readonly __read_mostly;
+int kernel_set_to_readonly __ro_after_init;
void set_kernel_text_rw(void)
{
@@ -917,12 +917,11 @@ void mark_rodata_ro(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
+ kernel_set_to_readonly = 1;
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
- kernel_set_to_readonly = 1;
-
#ifdef CONFIG_CPA_DEBUG
printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
start, start+size);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 642357aff216..8bbf93ce3cd2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -65,7 +65,7 @@
* around without checking the pgd every time.
*/
-pteval_t __supported_pte_mask __read_mostly = ~0;
+pteval_t __supported_pte_mask __ro_after_init = ~0;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
int force_personality32;
@@ -1185,7 +1185,7 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
-int kernel_set_to_readonly;
+int kernel_set_to_readonly __ro_after_init;
void set_kernel_text_rw(void)
{
@@ -1234,9 +1234,8 @@ void mark_rodata_ro(void)
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
-
kernel_set_to_readonly = 1;
+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
/*
* The rodata/data/bss/brk section (but not the kernel text!)
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 01e2b353a2b9..9aeddca4a29f 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -20,7 +20,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
* Softirq action handler - move entries to local list and loop over them
* while passing them to the queue registered handler.
*/
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static __latent_entropy void blk_done_softirq(void)
{
struct list_head *cpu_list, local_list;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 473f150d6b22..65a65f9824ed 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5141,7 +5141,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
ap = qc->ap;
qc->flags = 0;
@@ -5158,7 +5158,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
ap = qc->ap;
link = qc->dev->link;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index c28dca0c613d..d4813f0d25ca 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -9,7 +9,6 @@ source "drivers/tty/Kconfig"
config DEVMEM
bool "/dev/mem virtual device support"
- default y
help
Say Y here if you want to support the /dev/mem device.
The /dev/mem device is used to access areas of physical
@@ -568,7 +567,6 @@ config TELCLOCK
config DEVPORT
bool "/dev/port character device"
depends on ISA || PCI
- default y
help
Say Y here if you want to support the /dev/port device. The /dev/port
device is similar to /dev/mem, but for I/O ports.
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index e105532bfba8..e07d52bb9b62 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -1462,7 +1462,7 @@ static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
return cx24116_read_status(fe, status);
}
-static int cx24116_get_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index d37cb7762bd6..97e0feff0ede 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1555,7 +1555,7 @@ static int cx24117_tune(struct dvb_frontend *fe, bool re_tune,
return cx24117_read_status(fe, status);
}
-static int cx24117_get_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 7f11dcc94d85..01da670760ba 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -1491,7 +1491,7 @@ static int cx24120_tune(struct dvb_frontend *fe, bool re_tune,
return cx24120_read_status(fe, status);
}
-static int cx24120_get_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 1d59d1d3bd82..41cd0e9ea199 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1005,7 +1005,7 @@ static int cx24123_tune(struct dvb_frontend *fe,
return retval;
}
-static int cx24123_get_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index f6ebbb47b9b2..3e0d8cbd76da 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -403,7 +403,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
return DVBFE_ALGO_SEARCH_ERROR;
}
-static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_CUSTOM;
}
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index e8ac8c3e2ec0..e0f4ba8302d1 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2055,7 +2055,7 @@ static void mb86a20s_release(struct dvb_frontend *fe)
kfree(state);
}
-static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index 274544a3ae0e..9ef9b9bc1bd2 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -464,7 +464,7 @@ static int s921_tune(struct dvb_frontend *fe,
return rc;
}
-static int s921_get_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 7166d2279465..fa682f9fdc4b 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1657,7 +1657,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
return 0;
}
-static int dst_get_tuning_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe)
{
return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
}
diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
index f75f69556be7..d913a6050e8c 100644
--- a/drivers/media/pci/pt1/va1j5jf8007s.c
+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
@@ -98,7 +98,7 @@ static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr)
return 0;
}
-static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
index 63fda79a75c0..4115c3ccd4a8 100644
--- a/drivers/media/pci/pt1/va1j5jf8007t.c
+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
@@ -88,7 +88,7 @@ static int va1j5jf8007t_read_snr(struct dvb_frontend *fe, u16 *snr)
return 0;
}
-static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
+static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_HW;
}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 981b3ef71e47..9883da1da383 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -78,7 +78,7 @@ static irqreturn_t jp_handle_irq_event(unsigned int irq,
return 0;
}
-static void jp_tasklet_action(struct softirq_action *a)
+static void jp_tasklet_action(void)
{
lkdtm_handler();
jprobe_return();
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b811442c5ce6..4f62a63cbcb1 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -122,7 +122,6 @@ config UNIX98_PTYS
config LEGACY_PTYS
bool "Legacy (BSD) PTY support"
- default y
---help---
A pseudo terminal (PTY) is a software device consisting of two
halves: a master and a slave. The slave device behaves identical to
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 562d31073f9a..2184b9b5485f 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -171,6 +171,7 @@ static void free_tty_struct(struct tty_struct *tty)
put_device(tty->dev);
kfree(tty->write_buf);
tty->magic = 0xDEADDEAD;
+ put_user_ns(tty->owner_user_ns);
kfree(tty);
}
@@ -2154,11 +2155,19 @@ static int tty_fasync(int fd, struct file *filp, int on)
* FIXME: may race normal receive processing
*/
+int tiocsti_restrict = IS_ENABLED(CONFIG_SECURITY_TIOCSTI_RESTRICT);
+
static int tiocsti(struct tty_struct *tty, char __user *p)
{
char ch, mbz = 0;
struct tty_ldisc *ld;
+ if (tiocsti_restrict &&
+ !ns_capable(tty->owner_user_ns, CAP_SYS_ADMIN)) {
+ dev_warn_ratelimited(tty->dev,
+ "Denied TIOCSTI ioctl for non-privileged process\n");
+ return -EPERM;
+ }
if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ch, p))
@@ -2841,6 +2850,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
tty->index = idx;
tty_line_name(driver, idx, tty->name);
tty->dev = tty_get_device(tty);
+ tty->owner_user_ns = get_user_ns(current_user_ns());
return tty;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 442be7f312f6..788557d5c454 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -38,6 +38,8 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+extern int deny_new_usb;
+
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@ -4806,6 +4808,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
+
+ if (deny_new_usb) {
+ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
+ goto done;
+ }
+
if (hub_is_superspeed(hub->hdev))
unit_load = 150;
else
diff --git a/fs/exec.c b/fs/exec.c
index 0da4d748b4e6..69fcee853363 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,6 +62,7 @@
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
+#include <linux/random.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -321,6 +322,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
arch_bprm_mm_init(mm, vma);
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
+ if (randomize_va_space)
+ bprm->p ^= get_random_int() & ~PAGE_MASK;
return 0;
err:
up_write(&mm->mmap_sem);
diff --git a/fs/namei.c b/fs/namei.c
index 0b46b858cd42..3ae8e72341da 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -902,8 +902,8 @@ static inline void put_link(struct nameidata *nd)
path_put(&last->link);
}
-int sysctl_protected_symlinks __read_mostly = 0;
-int sysctl_protected_hardlinks __read_mostly = 0;
+int sysctl_protected_symlinks __read_mostly = 1;
+int sysctl_protected_hardlinks __read_mostly = 1;
/**
* may_follow_link - Check symlink following for unsafe situations
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 5f93cfacb3d1..cea0d7d3b23e 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -195,4 +195,3 @@ config NFS_DEBUG
bool
depends on NFS_FS && SUNRPC_DEBUG
select CRC32
- default y
diff --git a/fs/pipe.c b/fs/pipe.c
index 8ef7d7bef775..b82f305ec13d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -38,7 +38,7 @@ unsigned int pipe_max_size = 1048576;
/*
* Minimum pipe size, as required by POSIX
*/
-unsigned int pipe_min_size = PAGE_SIZE;
+unsigned int pipe_min_size __read_only = PAGE_SIZE;
/* Maximum allocatable pages per user. Hard limit is unset by default, soft
* matches default values.
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 1ade1206bb89..60b0f76dec47 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -39,7 +39,6 @@ config PROC_KCORE
config PROC_VMCORE
bool "/proc/vmcore support"
depends on PROC_FS && CRASH_DUMP
- default y
help
Exports the dump image of crashed kernel in ELF format.
diff --git a/fs/stat.c b/fs/stat.c
index 873785dae022..d3c2ada8b9c7 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -40,8 +40,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
stat->gid = inode->i_gid;
stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode);
- stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
+ if (is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
+ stat->atime = inode->i_ctime;
+ stat->mtime = inode->i_ctime;
+ } else {
+ stat->atime = inode->i_atime;
+ stat->mtime = inode->i_mtime;
+ }
stat->ctime = inode->i_ctime;
stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
@@ -75,9 +80,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
stat->result_mask |= STATX_BASIC_STATS;
request_mask &= STATX_ALL;
query_flags &= KSTAT_QUERY_FLAGS;
- if (inode->i_op->getattr)
- return inode->i_op->getattr(path, stat, request_mask,
- query_flags);
+ if (inode->i_op->getattr) {
+ int retval = inode->i_op->getattr(path, stat, request_mask, query_flags);
+ if (!retval && is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
+ stat->atime = stat->ctime;
+ stat->mtime = stat->ctime;
+ }
+ return retval;
+ }
generic_fillattr(inode, stat);
return 0;
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..e7157c18c62c 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -31,6 +31,8 @@
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
#endif
+#define __read_only __ro_after_init
+
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
diff --git a/include/linux/capability.h b/include/linux/capability.h
index f640dcbc880c..2b4f5d651f19 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -207,6 +207,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
+extern bool capable_noaudit(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
#else
@@ -232,6 +233,10 @@ static inline bool capable(int cap)
{
return true;
}
+static inline bool capable_noaudit(int cap)
+{
+ return true;
+}
static inline bool ns_capable(struct user_namespace *ns, int cap)
{
return true;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc613f20e5a6..7606596d6c2e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3392,4 +3392,15 @@ static inline bool dir_relax_shared(struct inode *inode)
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
+extern int device_sidechannel_restrict;
+
+static inline bool is_sidechannel_device(const struct inode *inode)
+{
+ umode_t mode;
+ if (!device_sidechannel_restrict)
+ return false;
+ mode = inode->i_mode;
+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
+}
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index bdaf22582f6e..326ff15d4637 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -181,6 +181,9 @@ static inline void fsnotify_access(struct file *file)
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_ACCESS;
+ if (is_sidechannel_device(inode))
+ return;
+
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
@@ -199,6 +202,9 @@ static inline void fsnotify_modify(struct file *file)
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_MODIFY;
+ if (is_sidechannel_device(inode))
+ return;
+
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b041f94678de..a5e0175c79e0 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -518,9 +518,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 776f90f3a1cd..3f5c47000059 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -191,6 +191,13 @@ static inline void clear_highpage(struct page *page)
kunmap_atomic(kaddr);
}
+static inline void verify_zero_highpage(struct page *page)
+{
+ void *kaddr = kmap_atomic(page);
+ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
+ kunmap_atomic(kaddr);
+}
+
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 69c238210325..ee487ea4f48f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -485,7 +485,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
- void (*action)(struct softirq_action *);
+ void (*action)(void);
};
asmlinkage void do_softirq(void);
@@ -500,7 +500,7 @@ static inline void do_softirq_own_stack(void)
}
#endif
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void __init open_softirq(int nr, void (*action)(void));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index df32d2508290..c992d130b94d 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -46,7 +46,7 @@ struct kobj_ns_type_operations {
void (*drop_ns)(void *);
};
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f23215854c80..98df98c44cc0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -525,7 +525,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((alloc_size(1)));
static inline void *kvmalloc(size_t size, gfp_t flags)
{
return kvmalloc_node(size, flags, NUMA_NO_NODE);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 296bbe49d5d1..b26652c9a98d 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -129,7 +129,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
#endif
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
@@ -137,8 +137,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((alloc_size(1)));
+extern void __percpu *__alloc_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8e22f24ded6a..b7fecdfa6de5 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1165,6 +1165,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
int perf_event_max_stack_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
+static inline bool perf_paranoid_any(void)
+{
+ return sysctl_perf_event_paranoid > 2;
+}
+
static inline bool perf_paranoid_tracepoint_raw(void)
{
return sysctl_perf_event_paranoid > -1;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ae5ed6492d54..fd0786124504 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -146,8 +146,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check __krealloc(const void *, size_t, gfp_t);
-void * __must_check krealloc(const void *, size_t, gfp_t);
+void * __must_check __krealloc(const void *, size_t, gfp_t) __attribute__((alloc_size(2)));
+void * __must_check krealloc(const void *, size_t, gfp_t) __attribute((alloc_size(2)));
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
@@ -324,7 +324,7 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
void kmem_cache_free(struct kmem_cache *, void *);
@@ -348,7 +348,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -473,7 +473,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* for general use, and so are not documented here. For a full list of
* potential flags, always refer to linux/gfp.h.
*/
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
if (size > KMALLOC_MAX_CACHE_SIZE)
@@ -513,7 +513,7 @@ static __always_inline int kmalloc_size(int n)
return 0;
}
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __attribute__((alloc_size(1))) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 39fa09bcde23..0b7a48cd883b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -120,6 +120,11 @@ struct kmem_cache {
unsigned long random;
#endif
+#ifdef CONFIG_SLAB_CANARY
+ unsigned long random_active;
+ unsigned long random_inactive;
+#endif
+
#ifdef CONFIG_NUMA
/*
* Defragmentation by allocating from a remote node.
diff --git a/include/linux/string.h b/include/linux/string.h
index cfd83eb2f926..b9ecb42c762d 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -234,10 +234,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob
void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
+#ifdef CONFIG_FORTIFY_SOURCE_STRICT_STRING
+#define __string_size(p) __builtin_object_size(p, 1)
+#else
+#define __string_size(p) __builtin_object_size(p, 0)
+#endif
+
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __string_size(p);
if (__builtin_constant_p(size) && p_size < size)
__write_overflow();
if (p_size < size)
@@ -247,7 +253,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
__FORTIFY_INLINE char *strcat(char *p, const char *q)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __string_size(p);
if (p_size == (size_t)-1)
return __builtin_strcat(p, q);
if (strlcat(p, q, p_size) >= p_size)
@@ -258,7 +264,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q)
__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
{
__kernel_size_t ret;
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __string_size(p);
/* Work around gcc excess stack consumption issue */
if (p_size == (size_t)-1 ||
@@ -273,7 +279,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __string_size(p);
__kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
if (p_size <= ret && maxlen != ret)
fortify_panic(__func__);
@@ -285,8 +291,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
{
size_t ret;
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+ size_t p_size = __string_size(p);
+ size_t q_size = __string_size(q);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __real_strlcpy(p, q, size);
ret = strlen(q);
@@ -306,8 +312,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
{
size_t p_len, copy_len;
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+ size_t p_size = __string_size(p);
+ size_t q_size = __string_size(q);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __builtin_strncat(p, q, count);
p_len = strlen(p);
@@ -420,8 +426,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
/* defined after fortified strlen and memcpy to reuse them */
__FORTIFY_INLINE char *strcpy(char *p, const char *q)
{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+ size_t p_size = __string_size(p);
+ size_t q_size = __string_size(q);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __builtin_strcpy(p, q);
memcpy(p, q, strlen(q) + 1);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1dd587ba6d88..9a9a04fb641d 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -13,6 +13,7 @@
#include <uapi/linux/tty.h>
#include <linux/rwsem.h>
#include <linux/llist.h>
+#include <linux/user_namespace.h>
/*
@@ -335,6 +336,7 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
+ struct user_namespace *owner_user_ns;
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
@@ -344,6 +346,8 @@ struct tty_file_private {
struct list_head list;
};
+extern int tiocsti_restrict;
+
/* tty magic number */
#define TTY_MAGIC 0x5401
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 1e5d8c392f15..66d0e49c9987 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -68,19 +68,19 @@ static inline void vmalloc_init(void)
}
#endif
-extern void *vmalloc(unsigned long size);
-extern void *vzalloc(unsigned long size);
-extern void *vmalloc_user(unsigned long size);
-extern void *vmalloc_node(unsigned long size, int node);
-extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_exec(unsigned long size);
-extern void *vmalloc_32(unsigned long size);
-extern void *vmalloc_32_user(unsigned long size);
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+extern void *vmalloc(unsigned long size) __attribute__((alloc_size(1)));
+extern void *vzalloc(unsigned long size) __attribute__((alloc_size(1)));
+extern void *vmalloc_user(unsigned long size) __attribute__((alloc_size(1)));
+extern void *vmalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
+extern void *vzalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
+extern void *vmalloc_exec(unsigned long size) __attribute__((alloc_size(1)));
+extern void *vmalloc_32(unsigned long size) __attribute__((alloc_size(1)));
+extern void *vmalloc_32_user(unsigned long size) __attribute__((alloc_size(1)));
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __attribute__((alloc_size(1)));
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller);
+ const void *caller) __attribute__((alloc_size(1)));
#ifndef CONFIG_MMU
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
diff --git a/init/Kconfig b/init/Kconfig
index 46075327c165..0c78750bc76d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -309,6 +309,7 @@ config USELIB
config AUDIT
bool "Auditing support"
depends on NET
+ default y
help
Enable auditing infrastructure that can be used with another
kernel subsystem, such as SELinux (which requires this for
@@ -1052,6 +1053,12 @@ config CC_OPTIMIZE_FOR_SIZE
endchoice
+config LOCAL_INIT
+ bool "Zero uninitialized locals"
+ help
+ Zero-fill uninitialized local variables, other than variable-length
+ arrays. Requires compiler support.
+
config SYSCTL
bool
@@ -1361,8 +1368,7 @@ config SHMEM
which may be appropriate on small systems without swap.
config AIO
- bool "Enable AIO support" if EXPERT
- default y
+ bool "Enable AIO support"
help
This option enables POSIX asynchronous I/O which may by used
by some high performance threaded applications. Disabling
@@ -1491,7 +1497,7 @@ config VM_EVENT_COUNTERS
config SLUB_DEBUG
default y
- bool "Enable SLUB debugging support" if EXPERT
+ bool "Enable SLUB debugging support"
depends on SLUB && SYSFS
help
SLUB has extensive debug support features. Disabling these can
@@ -1515,7 +1521,6 @@ config SLUB_MEMCG_SYSFS_ON
config COMPAT_BRK
bool "Disable heap randomization"
- default y
help
Randomizing heap placement makes heap exploits harder, but it
also breaks ancient binaries (including anything libc5 based).
@@ -1562,7 +1567,6 @@ endchoice
config SLAB_MERGE_DEFAULT
bool "Allow slab caches to be merged"
- default y
help
For reduced kernel memory fragmentation, slab caches can be
merged when they share the same size and other characteristics.
@@ -1575,9 +1579,9 @@ config SLAB_MERGE_DEFAULT
command line.
config SLAB_FREELIST_RANDOM
- default n
depends on SLAB || SLUB
bool "SLAB freelist randomization"
+ default y
help
Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab
@@ -1586,12 +1590,56 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata"
depends on SLUB
+ default y
help
Many kernel heap attacks try to target slab cache metadata and
other infrastructure. This options makes minor performance
sacrifies to harden the kernel slab allocator against common
freelist exploit methods.
+config SLAB_HARDENED
+ default y
+ depends on SLUB
+ bool "Hardened SLAB infrastructure"
+ help
+ Make minor performance sacrifices to harden the kernel slab
+ allocator.
+
+config SLAB_CANARY
+ depends on SLUB
+ depends on !SLAB_MERGE_DEFAULT
+ bool "SLAB canaries"
+ default y
+ help
+ Place canaries at the end of kernel slab allocations, sacrificing
+ some performance and memory usage for security.
+
+ Canaries can detect some forms of heap corruption when allocations
+ are freed and as part of the HARDENED_USERCOPY feature. It provides
+ basic use-after-free detection for HARDENED_USERCOPY.
+
+ Canaries absorb small overflows (rendering them harmless), mitigate
+ non-NUL terminated C string overflows on 64-bit via a guaranteed zero
+ byte and provide basic double-free detection.
+
+config SLAB_SANITIZE
+ bool "Sanitize SLAB allocations"
+ depends on SLUB
+ default y
+ help
+ Zero fill slab allocations on free, reducing the lifetime of
+ sensitive data and helping to mitigate use-after-free bugs.
+
+ For slabs with debug poisoning enabling, this has no impact.
+
+config SLAB_SANITIZE_VERIFY
+ depends on SLAB_SANITIZE && PAGE_SANITIZE
+ default y
+ bool "Verify sanitized SLAB allocations"
+ help
+ Verify that newly allocated slab allocations are zeroed to detect
+ write-after-free bugs.
+
config SLUB_CPU_PARTIAL
default y
depends on SLUB && SMP
diff --git a/kernel/audit.c b/kernel/audit.c
index 5b34d3114af4..e57930192ce1 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1573,6 +1573,9 @@ static int __init audit_enable(char *str)
audit_default = !!simple_strtol(str, NULL, 0);
if (!audit_default)
audit_initialized = AUDIT_DISABLED;
+ else
+ audit_initialized = AUDIT_UNINITIALIZED;
+
audit_enabled = audit_default;
audit_ever_enabled = !!audit_enabled;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d203a5d6b726..2a6c3e2c57a6 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -539,7 +539,7 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
-int bpf_jit_harden __read_mostly;
+int bpf_jit_harden __read_mostly = 2;
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4e933219fec6..0f37db32a2b1 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(prog_idr_lock);
static DEFINE_IDR(map_idr);
static DEFINE_SPINLOCK(map_idr_lock);
-int sysctl_unprivileged_bpf_disabled __read_mostly;
+int sysctl_unprivileged_bpf_disabled __read_mostly = 1;
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _ops)
diff --git a/kernel/capability.c b/kernel/capability.c
index 1e1c0236f55b..452062fe45ce 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -431,6 +431,12 @@ bool capable(int cap)
return ns_capable(&init_user_ns, cap);
}
EXPORT_SYMBOL(capable);
+
+bool capable_noaudit(int cap)
+{
+ return ns_capable_noaudit(&init_user_ns, cap);
+}
+EXPORT_SYMBOL(capable_noaudit);
#endif /* CONFIG_MULTIUSER */
/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index cb8274d7824f..c1b3d232b0a4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask;
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
+ * 3 - disallow all unpriv perf event use
*/
+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+int sysctl_perf_event_paranoid __read_mostly = 3;
+#else
int sysctl_perf_event_paranoid __read_mostly = 2;
+#endif
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -9941,6 +9946,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
diff --git a/kernel/fork.c b/kernel/fork.c
index 98c91bd341b4..dbb9540ee61c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -102,6 +102,11 @@
#define CREATE_TRACE_POINTS
#include <trace/events/task.h>
+#ifdef CONFIG_USER_NS
+extern int unprivileged_userns_clone;
+#else
+#define unprivileged_userns_clone 0
+#endif
/*
* Minimum number of threads to boot the kernel
@@ -1554,6 +1559,10 @@ static __latent_entropy struct task_struct *copy_process(
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
return ERR_PTR(-EINVAL);
+ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
@@ -2347,6 +2356,12 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
+ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
+ err = -EPERM;
+ if (!capable(CAP_SYS_ADMIN))
+ goto bad_unshare_out;
+ }
+
err = check_unshare_flags(unshare_flags);
if (err)
goto bad_unshare_out;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0972a8e09d08..00dde7aad47a 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1136,7 +1136,7 @@ void free_basic_memory_bitmaps(void)
void clear_free_pages(void)
{
-#ifdef CONFIG_PAGE_POISONING_ZERO
+#if defined(CONFIG_PAGE_POISONING_ZERO) || defined(CONFIG_PAGE_SANITIZE)
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
@@ -1153,7 +1153,7 @@ void clear_free_pages(void)
}
memory_bm_position_reset(bm);
pr_info("PM: free pages cleared after restore\n");
-#endif /* PAGE_POISONING_ZERO */
+#endif /* PAGE_POISONING_ZERO || PAGE_SANITIZE */
}
/**
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index a64eee0db39e..4d7de378fe4c 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -164,7 +164,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
}
}
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(void)
{
__rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3e3650e94ae6..7ecd7a5d04b3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2918,7 +2918,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(void)
{
struct rcu_state *rsp;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5c09ddf8c832..f5db6ece105a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8986,7 +8986,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
-static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
+static __latent_entropy void run_rebalance_domains(void)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance ?
diff --git a/kernel/softirq.c b/kernel/softirq.c
index e89c3b0cff6d..0d3ebd520931 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
EXPORT_SYMBOL(irq_stat);
#endif
-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
+static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE);
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
@@ -281,7 +281,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
- h->action(h);
+ h->action();
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
@@ -444,7 +444,7 @@ void __raise_softirq_irqoff(unsigned int nr)
or_softirq_pending(1UL << nr);
}
-void open_softirq(int nr, void (*action)(struct softirq_action *))
+void __init open_softirq(int nr, void (*action)(void))
{
softirq_vec[nr].action = action;
}
@@ -486,7 +486,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
}
EXPORT_SYMBOL(__tasklet_hi_schedule);
-static __latent_entropy void tasklet_action(struct softirq_action *a)
+static __latent_entropy void tasklet_action(void)
{
struct tasklet_struct *list;
@@ -522,7 +522,7 @@ static __latent_entropy void tasklet_action(struct softirq_action *a)
}
}
-static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
+static __latent_entropy void tasklet_hi_action(void)
{
struct tasklet_struct *list;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 069550540a39..822783a174aa 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -66,6 +66,7 @@
#include <linux/kexec.h>
#include <linux/bpf.h>
#include <linux/mount.h>
+#include <linux/tty.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
@@ -98,12 +99,19 @@
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
+#if IS_ENABLED(CONFIG_USB)
+int deny_new_usb __read_mostly = 0;
+EXPORT_SYMBOL(deny_new_usb);
+#endif
extern int suid_dumpable;
#ifdef CONFIG_COREDUMP
extern int core_uses_pid;
extern char core_pattern[];
extern unsigned int core_pipe_limit;
#endif
+#ifdef CONFIG_USER_NS
+extern int unprivileged_userns_clone;
+#endif
extern int pid_max;
extern int pid_max_min, pid_max_max;
extern int percpu_pagelist_fraction;
@@ -115,40 +123,43 @@ extern int sysctl_nr_trim_pages;
/* Constants used for minimum and maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
-static int sixty = 60;
+static int sixty __read_only = 60;
#endif
-static int __maybe_unused neg_one = -1;
+static int __maybe_unused neg_one __read_only = -1;
static int zero;
-static int __maybe_unused one = 1;
-static int __maybe_unused two = 2;
-static int __maybe_unused four = 4;
-static unsigned long one_ul = 1;
-static int one_hundred = 100;
-static int one_thousand = 1000;
+static int __maybe_unused one __read_only = 1;
+static int __maybe_unused two __read_only = 2;
+static int __maybe_unused four __read_only = 4;
+static unsigned long one_ul __read_only = 1;
+static int one_hundred __read_only = 100;
+static int one_thousand __read_only = 1000;
#ifdef CONFIG_PRINTK
-static int ten_thousand = 10000;
+static int ten_thousand __read_only = 10000;
#endif
#ifdef CONFIG_PERF_EVENTS
-static int six_hundred_forty_kb = 640 * 1024;
+static int six_hundred_forty_kb __read_only = 640 * 1024;
#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
-static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
+static unsigned long dirty_bytes_min __read_only = 2 * PAGE_SIZE;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
-static int maxolduid = 65535;
-static int minolduid;
+static int maxolduid __read_only = 65535;
+static int minolduid __read_only;
-static int ngroups_max = NGROUPS_MAX;
+static int ngroups_max __read_only = NGROUPS_MAX;
static const int cap_last_cap = CAP_LAST_CAP;
/*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
#ifdef CONFIG_DETECT_HUNG_TASK
-static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ);
#endif
+int device_sidechannel_restrict __read_mostly = 1;
+EXPORT_SYMBOL(device_sidechannel_restrict);
+
#ifdef CONFIG_INOTIFY_USER
#include <linux/inotify.h>
#endif
@@ -286,19 +297,19 @@ static struct ctl_table sysctl_base_table[] = {
};
#ifdef CONFIG_SCHED_DEBUG
-static int min_sched_granularity_ns = 100000; /* 100 usecs */
-static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
-static int min_wakeup_granularity_ns; /* 0 usecs */
-static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
+static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
+static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
+static int max_wakeup_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
#ifdef CONFIG_SMP
-static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
-static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
+static int min_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_NONE;
+static int max_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_END-1;
#endif /* CONFIG_SMP */
#endif /* CONFIG_SCHED_DEBUG */
#ifdef CONFIG_COMPACTION
-static int min_extfrag_threshold;
-static int max_extfrag_threshold = 1000;
+static int min_extfrag_threshold __read_only;
+static int max_extfrag_threshold __read_only = 1000;
#endif
static struct ctl_table kern_table[] = {
@@ -512,6 +523,15 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
+#ifdef CONFIG_USER_NS
+ {
+ .procname = "unprivileged_userns_clone",
+ .data = &unprivileged_userns_clone,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
@@ -853,6 +873,37 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
.extra2 = &two,
},
+#endif
+#if defined CONFIG_TTY
+ {
+ .procname = "tiocsti_restrict",
+ .data = &tiocsti_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_sysadmin,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
+ {
+ .procname = "device_sidechannel_restrict",
+ .data = &device_sidechannel_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_sysadmin,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#if IS_ENABLED(CONFIG_USB)
+ {
+ .procname = "deny_new_usb",
+ .data = &deny_new_usb,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_sysadmin,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
#endif
{
.procname = "ngroups_max",
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 9fe525f410bf..6a85b0e1292e 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1624,7 +1624,7 @@ static inline void __run_timers(struct timer_base *base)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
-static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(void)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index c490f1e4313b..dd03bd39d7bf 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -24,6 +24,9 @@
#include <linux/projid.h>
#include <linux/fs_struct.h>
+/* sysctl */
+int unprivileged_userns_clone;
+
static struct kmem_cache *user_ns_cachep __read_mostly;
static DEFINE_MUTEX(userns_state_mutex);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 62d0e25c054c..3953072277eb 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -937,6 +937,7 @@ endmenu # "Debug lockups and hangs"
config PANIC_ON_OOPS
bool "Panic on Oops"
+ default y
help
Say Y here to enable the kernel to panic when it oopses. This
has the same effect as setting oops=panic on the kernel command
@@ -946,7 +947,7 @@ config PANIC_ON_OOPS
anything erroneous after an oops which could result in data
corruption or other issues.
- Say N if unsure.
+ Say Y if unsure.
config PANIC_ON_OOPS_VALUE
int
@@ -1319,6 +1320,7 @@ config DEBUG_BUGVERBOSE
config DEBUG_LIST
bool "Debug linked list manipulation"
depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
+ default y
help
Enable this to turn on extended checks in the linked-list
walking routines.
@@ -1932,6 +1934,7 @@ config MEMTEST
config BUG_ON_DATA_CORRUPTION
bool "Trigger a BUG when data corruption is detected"
select DEBUG_LIST
+ default y
help
Select this option if the kernel should BUG when it encounters
data corruption in kernel memory structures when they get checked
@@ -1952,7 +1955,7 @@ config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
depends on ARCH_HAS_DEVMEM_IS_ALLOWED
- default y if TILE || PPC
+ default y
---help---
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
@@ -1971,6 +1974,7 @@ config STRICT_DEVMEM
config IO_STRICT_DEVMEM
bool "Filter I/O access to /dev/mem"
depends on STRICT_DEVMEM
+ default y
---help---
If this option is disabled, you allow userspace (root) access to all
io-memory regardless of whether a driver is actively using that
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 86a709954f5a..6f15787fcb1b 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop)
}
EXPORT_SYMBOL(irq_poll_complete);
-static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
+static void __latent_entropy irq_poll_softirq(void)
{
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = irq_poll_budget;
diff --git a/lib/kobject.c b/lib/kobject.c
index 34f847252c02..4fda329de614 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -956,9 +956,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __ro_after_init;
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
{
enum kobj_ns_type type = ops->type;
int error;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 3d8295c85505..3fa3b3409d69 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -341,6 +341,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
{
int minlen = min_t(int, count, nla_len(src));
+ BUG_ON(minlen < 0);
+
memcpy(dest, nla_data(src), minlen);
if (count > minlen)
memset(dest + minlen, 0, count - minlen);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 86c3385b9eb3..c482070e379b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1591,7 +1591,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-int kptr_restrict __read_mostly;
+int kptr_restrict __read_mostly = 2;
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
diff --git a/mm/Kconfig b/mm/Kconfig
index 59efbd3337e0..c070e14ec83d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -319,7 +319,8 @@ config KSM
config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation"
depends on MMU
- default 4096
+ default 32768 if ARM || (ARM64 && COMPAT)
+ default 65536
help
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
diff --git a/mm/mmap.c b/mm/mmap.c
index 11f96fad5271..632e7f9a710e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -220,6 +220,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
+ /* properly handle unaligned min_brk as an empty heap */
+ if (min_brk & ~PAGE_MASK) {
+ if (brk == min_brk)
+ newbrk -= PAGE_SIZE;
+ if (mm->brk == min_brk)
+ oldbrk -= PAGE_SIZE;
+ }
if (oldbrk == newbrk)
goto set_brk;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1d7693c35424..8963a3b4d37c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -67,6 +67,7 @@
#include <linux/ftrace.h>
#include <linux/lockdep.h>
#include <linux/nmi.h>
+#include <linux/random.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -98,6 +99,15 @@ int _node_numa_mem_[MAX_NUMNODES];
DEFINE_MUTEX(pcpu_drain_mutex);
DEFINE_PER_CPU(struct work_struct, pcpu_drain);
+bool __meminitdata extra_latent_entropy;
+
+static int __init setup_extra_latent_entropy(char *str)
+{
+ extra_latent_entropy = true;
+ return 0;
+}
+early_param("extra_latent_entropy", setup_extra_latent_entropy);
+
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
@@ -1063,6 +1073,13 @@ static __always_inline bool free_pages_prepare(struct page *page,
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
+
+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE)) {
+ int i;
+ for (i = 0; i < (1 << order); i++)
+ clear_highpage(page + i);
+ }
+
arch_free_page(page, order);
kernel_poison_pages(page, 1 << order, 0);
kernel_map_pages(page, 1 << order, 0);
@@ -1278,6 +1295,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
__ClearPageReserved(p);
set_page_count(p, 0);
+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
+ unsigned long hash = 0;
+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
+ const unsigned long *data = lowmem_page_address(page);
+
+ for (index = 0; index < end; index++)
+ hash ^= hash + data[index];
+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
+ latent_entropy ^= hash;
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+#else
+ add_device_randomness((const void *)&hash, sizeof(hash));
+#endif
+ }
+
page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
@@ -1718,8 +1750,8 @@ static inline int check_new_page(struct page *page)
static inline bool free_pages_prezeroed(void)
{
- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
- page_poisoning_enabled();
+ return IS_ENABLED(CONFIG_PAGE_SANITIZE) ||
+ (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled());
}
#ifdef CONFIG_DEBUG_VM
@@ -1776,6 +1808,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
post_alloc_hook(page, order, gfp_flags);
+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY)) {
+ for (i = 0; i < (1 << order); i++)
+ verify_zero_highpage(page + i);
+ }
+
if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
clear_highpage(page + i);
diff --git a/mm/slab.h b/mm/slab.h
index 485d9fbb8802..436461588804 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -311,7 +311,11 @@ static inline bool is_root_cache(struct kmem_cache *s)
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
+#ifdef CONFIG_SLAB_HARDENED
+ return p == s;
+#else
return true;
+#endif
}
static inline const char *cache_name(struct kmem_cache *s)
@@ -363,18 +367,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
* to not do even the assignment. In that case, slab_equal_or_root
* will also be a constant.
*/
- if (!memcg_kmem_enabled() &&
+ if (!IS_ENABLED(CONFIG_SLAB_HARDENED) &&
+ !memcg_kmem_enabled() &&
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
return s;
page = virt_to_head_page(x);
+#ifdef CONFIG_SLAB_HARDENED
+ BUG_ON(!PageSlab(page));
+#endif
cachep = page->slab_cache;
if (slab_equal_or_root(cachep, s))
return cachep;
pr_err("%s: Wrong slab cache. %s but object is from %s\n",
__func__, s->name, cachep->name);
+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
+ BUG_ON(1);
+#else
WARN_ON_ONCE(1);
+#endif
return s;
}
@@ -399,7 +411,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
* back there or track user information then we can
* only use the space before that information.
*/
- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
+ if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY))
return s->inuse;
/*
* Else we can use all the padding etc for the allocation
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 65212caa1f2a..d8bf8a75f445 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -26,10 +26,10 @@
#include "slab.h"
-enum slab_state slab_state;
+enum slab_state slab_state __ro_after_init;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
-struct kmem_cache *kmem_cache;
+struct kmem_cache *kmem_cache __ro_after_init;
static LIST_HEAD(slab_caches_to_rcu_destroy);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
@@ -49,7 +49,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
/*
* Merge control. If this is set then no merging of slab caches will occur.
*/
-static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
+static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
static int __init setup_slab_nomerge(char *str)
{
@@ -927,7 +927,7 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
-static s8 size_index[24] = {
+static s8 size_index[24] __ro_after_init = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
diff --git a/mm/slub.c b/mm/slub.c
index 41c01690d116..591dd60d37f3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -125,6 +125,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
+static inline bool has_sanitize(struct kmem_cache *s)
+{
+ return IS_ENABLED(CONFIG_SLAB_SANITIZE) && !(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON));
+}
+
+static inline bool has_sanitize_verify(struct kmem_cache *s)
+{
+ return IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && has_sanitize(s);
+}
+
void *fixup_red_left(struct kmem_cache *s, void *p)
{
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
@@ -297,6 +307,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
}
+#ifdef CONFIG_SLAB_CANARY
+static inline unsigned long *get_canary(struct kmem_cache *s, void *object)
+{
+ if (s->offset)
+ return object + s->offset + sizeof(void *);
+ return object + s->inuse;
+}
+
+static inline unsigned long get_canary_value(const void *canary, unsigned long value)
+{
+ return (value ^ (unsigned long)canary) & CANARY_MASK;
+}
+
+static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value)
+{
+ unsigned long *canary = get_canary(s, object);
+ *canary = get_canary_value(canary, value);
+}
+
+static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value)
+{
+ unsigned long *canary = get_canary(s, object);
+ BUG_ON(*canary != get_canary_value(canary, value));
+}
+#else
+#define set_canary(s, object, value)
+#define check_canary(s, object, value)
+#endif
+
/* Loop over all objects in a slab */
#define for_each_object(__p, __s, __addr, __objects) \
for (__p = fixup_red_left(__s, __addr); \
@@ -484,13 +523,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
* Debug settings:
*/
#if defined(CONFIG_SLUB_DEBUG_ON)
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
+static int slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS;
#else
-static int slub_debug;
+static int slub_debug __ro_after_init;
#endif
-static char *slub_debug_slabs;
-static int disable_higher_order_debug;
+static char *slub_debug_slabs __ro_after_init;
+static int disable_higher_order_debug __ro_after_init;
/*
* slub is about to manipulate internal object metadata. This memory lies
@@ -550,6 +589,9 @@ static struct track *get_track(struct kmem_cache *s, void *object,
else
p = object + s->inuse;
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
+ p = (void *)p + sizeof(void *);
+
return p + alloc;
}
@@ -688,6 +730,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
else
off = s->inuse;
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
+ off += sizeof(void *);
+
if (s->flags & SLAB_STORE_USER)
off += 2 * sizeof(struct track);
@@ -817,6 +862,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
/* Freepointer is placed after the object. */
off += sizeof(void *);
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
+ off += sizeof(void *);
+
if (s->flags & SLAB_STORE_USER)
/* We also have user information there */
off += 2 * sizeof(struct track);
@@ -1416,8 +1464,9 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
+ set_canary(s, object, s->random_inactive);
kasan_init_slab_obj(s, object);
- if (unlikely(s->ctor)) {
+ if (unlikely(s->ctor) && !has_sanitize_verify(s)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
kasan_poison_object_data(s, object);
@@ -2717,9 +2766,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
stat(s, ALLOC_FASTPATH);
}
- if (unlikely(gfpflags & __GFP_ZERO) && object)
+ if (has_sanitize_verify(s) && object) {
+ size_t offset = s->offset ? 0 : sizeof(void *);
+ BUG_ON(memchr_inv(object + offset, 0, s->object_size - offset));
+ if (s->ctor)
+ s->ctor(object);
+ if (unlikely(gfpflags & __GFP_ZERO) && offset)
+ memset(object, 0, sizeof(void *));
+ } else if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->object_size);
+ if (object) {
+ check_canary(s, object, s->random_inactive);
+ set_canary(s, object, s->random_active);
+ }
+
slab_post_alloc_hook(s, gfpflags, 1, &object);
return object;
@@ -2926,6 +2987,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
void *tail_obj = tail ? : head;
struct kmem_cache_cpu *c;
unsigned long tid;
+ bool sanitize = has_sanitize(s);
+
+ if (IS_ENABLED(CONFIG_SLAB_CANARY) || sanitize) {
+ __maybe_unused int offset = s->offset ? 0 : sizeof(void *);
+ void *x = head;
+
+ while (1) {
+ check_canary(s, x, s->random_active);
+ set_canary(s, x, s->random_inactive);
+
+ if (sanitize) {
+ memset(x + offset, 0, s->object_size - offset);
+ if (!IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && s->ctor)
+ s->ctor(x);
+ }
+ if (x == tail_obj)
+ break;
+ x = get_freepointer(s, x);
+ }
+ }
+
redo:
/*
* Determine the currently cpus per cpu slab.
@@ -3104,7 +3186,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{
struct kmem_cache_cpu *c;
- int i;
+ int i, k;
/* memcg and kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
@@ -3141,13 +3223,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
local_irq_enable();
/* Clear memory outside IRQ disabled fastpath loop */
- if (unlikely(flags & __GFP_ZERO)) {
+ if (has_sanitize_verify(s)) {
+ int j;
+
+ for (j = 0; j < i; j++) {
+ size_t offset = s->offset ? 0 : sizeof(void *);
+ BUG_ON(memchr_inv(p[j] + offset, 0, s->object_size - offset));
+ if (s->ctor)
+ s->ctor(p[j]);
+ if (unlikely(flags & __GFP_ZERO) && offset)
+ memset(p[j], 0, sizeof(void *));
+ }
+ } else if (unlikely(flags & __GFP_ZERO)) {
int j;
for (j = 0; j < i; j++)
memset(p[j], 0, s->object_size);
}
+ for (k = 0; k < i; k++) {
+ check_canary(s, p[k], s->random_inactive);
+ set_canary(s, p[k], s->random_active);
+ }
+
/* memcg and kmem_cache debug support */
slab_post_alloc_hook(s, flags, size, p);
return i;
@@ -3179,9 +3277,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
* and increases the number of allocations possible without having to
* take the list_lock.
*/
-static int slub_min_order;
-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
-static int slub_min_objects;
+static int slub_min_order __ro_after_init;
+static int slub_max_order __ro_after_init = PAGE_ALLOC_COSTLY_ORDER;
+static int slub_min_objects __ro_after_init;
/*
* Calculate the order of allocation given an slab object size.
@@ -3351,6 +3449,7 @@ static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
+ set_canary(kmem_cache_node, n, kmem_cache_node->random_active);
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
GFP_KERNEL);
init_kmem_cache_node(n);
@@ -3507,6 +3606,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
size += sizeof(void *);
}
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
+ size += sizeof(void *);
+
#ifdef CONFIG_SLUB_DEBUG
if (flags & SLAB_STORE_USER)
/*
@@ -3577,6 +3679,10 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
#ifdef CONFIG_SLAB_FREELIST_HARDENED
s->random = get_random_long();
#endif
+#ifdef CONFIG_SLAB_CANARY
+ s->random_active = get_random_long();
+ s->random_inactive = get_random_long();
+#endif
if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
s->reserved = sizeof(struct rcu_head);
@@ -3841,6 +3947,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
offset -= s->red_left_pad;
}
+ check_canary(s, (void *)ptr - offset, s->random_active);
+
/* Allow address range falling entirely within object size. */
if (offset <= object_size && n <= object_size - offset)
return NULL;
@@ -3859,7 +3967,11 @@ static size_t __ksize(const void *object)
page = virt_to_head_page(object);
if (unlikely(!PageSlab(page))) {
+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
+ BUG_ON(!PageCompound(page));
+#else
WARN_ON(!PageCompound(page));
+#endif
return PAGE_SIZE << compound_order(page);
}
@@ -4724,7 +4836,7 @@ enum slab_stat_type {
#define SO_TOTAL (1 << SL_TOTAL)
#ifdef CONFIG_MEMCG
-static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
+static bool memcg_sysfs_enabled __ro_after_init = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
static int __init setup_slub_memcg_sysfs(char *str)
{
diff --git a/mm/swap.c b/mm/swap.c
index a77d68f2c1b6..d1f1d75f4d1f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -92,6 +92,13 @@ static void __put_compound_page(struct page *page)
if (!PageHuge(page))
__page_cache_release(page);
dtor = get_compound_page_dtor(page);
+ if (!PageHuge(page))
+ BUG_ON(dtor != free_compound_page
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ && dtor != free_transhuge_page
+#endif
+ );
+
(*dtor)(page);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ca771f2f25b..6da2c9c3e6a5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4095,7 +4095,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
-static __latent_entropy void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -5609,7 +5609,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
return work;
}
-static __latent_entropy void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index f48fe6fc7e8c..d78c52835c08 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -261,6 +261,7 @@ config IP_PIMSM_V2
config SYN_COOKIES
bool "IP: TCP syncookie support"
+ default y
---help---
Normal TCP/IP networking is open to an attack known as "SYN
flooding". This denial-of-service attack prevents legitimate remote
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 54deaa1066cf..211f97bd5ee3 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -37,6 +37,7 @@ static int vmlinux_section_warnings = 1;
static int warn_unresolved = 0;
/* How a symbol is exported */
static int sec_mismatch_count = 0;
+static int writable_fptr_count = 0;
static int sec_mismatch_verbose = 1;
static int sec_mismatch_fatal = 0;
/* ignore missing files */
@@ -965,6 +966,7 @@ enum mismatch {
ANY_EXIT_TO_ANY_INIT,
EXPORT_TO_INIT_EXIT,
EXTABLE_TO_NON_TEXT,
+ DATA_TO_TEXT
};
/**
@@ -1091,6 +1093,12 @@ static const struct sectioncheck sectioncheck[] = {
.good_tosec = {ALL_TEXT_SECTIONS , NULL},
.mismatch = EXTABLE_TO_NON_TEXT,
.handler = extable_mismatch_handler,
+},
+/* Do not reference code from writable data */
+{
+ .fromsec = { DATA_SECTIONS, NULL },
+ .bad_tosec = { ALL_TEXT_SECTIONS, NULL },
+ .mismatch = DATA_TO_TEXT
}
};
@@ -1240,10 +1248,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
continue;
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
- if (sym->st_value == addr)
- return sym;
/* Find a symbol nearby - addr are maybe negative */
d = sym->st_value - addr;
+ if (d == 0)
+ return sym;
if (d < 0)
d = addr - sym->st_value;
if (d < distance) {
@@ -1402,7 +1410,11 @@ static void report_sec_mismatch(const char *modname,
char *prl_from;
char *prl_to;
- sec_mismatch_count++;
+ if (mismatch->mismatch == DATA_TO_TEXT)
+ writable_fptr_count++;
+ else
+ sec_mismatch_count++;
+
if (!sec_mismatch_verbose)
return;
@@ -1526,6 +1538,14 @@ static void report_sec_mismatch(const char *modname,
fatal("There's a special handler for this mismatch type, "
"we should never get here.");
break;
+ case DATA_TO_TEXT:
+#if 0
+ fprintf(stderr,
+ "The %s %s:%s references\n"
+ "the %s %s:%s%s\n",
+ from, fromsec, fromsym, to, tosec, tosym, to_p);
+#endif
+ break;
}
fprintf(stderr, "\n");
}
@@ -2539,6 +2559,14 @@ int main(int argc, char **argv)
}
}
free(buf.p);
+ if (writable_fptr_count) {
+ if (!sec_mismatch_verbose) {
+ warn("modpost: Found %d writable function pointer(s).\n"
+ "To see full details build your kernel with:\n"
+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
+ writable_fptr_count);
+ }
+ }
return err;
}
diff --git a/security/Kconfig b/security/Kconfig
index 87f2a6f842fd..7bdbb7edf5bf 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -8,7 +8,7 @@ source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
- default n
+ default y
help
This enforces restrictions on unprivileged users reading the kernel
syslog via dmesg(8).
@@ -18,10 +18,34 @@ config SECURITY_DMESG_RESTRICT
If you are unsure how to answer this question, answer N.
+config SECURITY_PERF_EVENTS_RESTRICT
+ bool "Restrict unprivileged use of performance events"
+ depends on PERF_EVENTS
+ default y
+ help
+ If you say Y here, the kernel.perf_event_paranoid sysctl
+ will be set to 3 by default, and no unprivileged use of the
+ perf_event_open syscall will be permitted unless it is
+ changed.
+
+config SECURITY_TIOCSTI_RESTRICT
+ bool "Restrict unprivileged use of tiocsti command injection"
+ default y
+ help
+ This enforces restrictions on unprivileged users injecting commands
+ into other processes which share a tty session using the TIOCSTI
+ ioctl. This option makes TIOCSTI use require CAP_SYS_ADMIN.
+
+ If this option is not selected, no restrictions will be enforced
+ unless the tiocsti_restrict sysctl is explicitly set to (1).
+
+ If you are unsure how to answer this question, answer N.
+
config SECURITY
bool "Enable different security models"
depends on SYSFS
depends on MULTIUSER
+ default y
help
This allows you to choose different security modules to be
configured into your kernel.
@@ -48,6 +72,7 @@ config SECURITYFS
config SECURITY_NETWORK
bool "Socket and Networking Security Hooks"
depends on SECURITY
+ default y
help
This enables the socket and networking security hooks.
If enabled, a security module can use these hooks to
@@ -155,6 +180,7 @@ config HARDENED_USERCOPY
depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
select BUG
imply STRICT_DEVMEM
+ default y
help
This option checks for obviously wrong memory regions when
copying memory to/from the kernel (via copy_to_user() and
@@ -178,10 +204,36 @@ config HARDENED_USERCOPY_PAGESPAN
config FORTIFY_SOURCE
bool "Harden common str/mem functions against buffer overflows"
depends on ARCH_HAS_FORTIFY_SOURCE
+ default y
help
Detect overflows of buffers in common string and memory functions
where the compiler can determine and validate the buffer sizes.
+config FORTIFY_SOURCE_STRICT_STRING
+ bool "Harden common functions against buffer overflows"
+ depends on FORTIFY_SOURCE
+ depends on EXPERT
+ help
+ Perform stricter overflow checks catching overflows within objects
+ for common C string functions rather than only between objects.
+
+ This is not yet intended for production use, only bug finding.
+
+config PAGE_SANITIZE
+ bool "Sanitize pages"
+ default y
+ help
+ Zero fill page allocations on free, reducing the lifetime of
+ sensitive data and helping to mitigate use-after-free bugs.
+
+config PAGE_SANITIZE_VERIFY
+ bool "Verify sanitized pages"
+ depends on PAGE_SANITIZE
+ default y
+ help
+ Verify that newly allocated pages are zeroed to detect
+ write-after-free bugs.
+
config STATIC_USERMODEHELPER
bool "Force all usermode helper calls through a single binary"
help
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 8af7a690eb40..6539694b0fd3 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -2,7 +2,7 @@ config SECURITY_SELINUX
bool "NSA SELinux Support"
depends on SECURITY_NETWORK && AUDIT && NET && INET
select NETWORK_SECMARK
- default n
+ default y
help
This selects NSA Security-Enhanced Linux (SELinux).
You will also need a policy configuration and a labeled filesystem.
@@ -79,23 +79,3 @@ config SECURITY_SELINUX_AVC_STATS
This option collects access vector cache statistics to
/selinux/avc/cache_stats, which may be monitored via
tools such as avcstat.
-
-config SECURITY_SELINUX_CHECKREQPROT_VALUE
- int "NSA SELinux checkreqprot default value"
- depends on SECURITY_SELINUX
- range 0 1
- default 0
- help
- This option sets the default value for the 'checkreqprot' flag
- that determines whether SELinux checks the protection requested
- by the application or the protection that will be applied by the
- kernel (including any implied execute for read-implies-exec) for
- mmap and mprotect calls. If this option is set to 0 (zero),
- SELinux will default to checking the protection that will be applied
- by the kernel. If this option is set to 1 (one), SELinux will
- default to checking the protection requested by the application.
- The checkreqprot flag may be changed from the default via the
- 'checkreqprot=' boot parameter. It may also be changed at runtime
- via /selinux/checkreqprot if authorized by policy.
-
- If you are unsure how to answer this question, answer 0.
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 1649cd18eb0b..067f35559aa7 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -150,6 +150,6 @@ struct pkey_security_struct {
u32 sid; /* SID of pkey */
};
-extern unsigned int selinux_checkreqprot;
+extern const unsigned int selinux_checkreqprot;
#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 00eed842c491..8f7b8d7e6f91 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -41,16 +41,7 @@
#include "objsec.h"
#include "conditional.h"
-unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
-
-static int __init checkreqprot_setup(char *str)
-{
- unsigned long checkreqprot;
- if (!kstrtoul(str, 0, &checkreqprot))
- selinux_checkreqprot = checkreqprot ? 1 : 0;
- return 1;
-}
-__setup("checkreqprot=", checkreqprot_setup);
+const unsigned int selinux_checkreqprot;
static DEFINE_MUTEX(sel_mutex);
@@ -610,10 +601,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
return PTR_ERR(page);
length = -EINVAL;
- if (sscanf(page, "%u", &new_value) != 1)
+ if (sscanf(page, "%u", &new_value) != 1 || new_value)
goto out;
- selinux_checkreqprot = new_value ? 1 : 0;
length = count;
out:
kfree(page);
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
index 96b27405558a..485c1b85c325 100644
--- a/security/yama/Kconfig
+++ b/security/yama/Kconfig
@@ -1,7 +1,7 @@
config SECURITY_YAMA
bool "Yama support"
depends on SECURITY
- default n
+ default y
help
This selects Yama, which extends DAC support with additional
system-wide security settings beyond regular Linux discretionary