Pull x86 kernel address space randomization support from Peter Anvin: "This enables kernel address space randomization for x86" * 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, kaslr: Clarify RANDOMIZE_BASE_MAX_OFFSET x86, kaslr: Remove unused including <linux/version.h> x86, kaslr: Use char array to gain sizeof sanity x86, kaslr: Add a circular multiply for better bit diffusion x86, kaslr: Mix entropy sources together as needed x86/relocs: Add percpu fixup for GNU ld 2.23 x86, boot: Rename get_flags() and check_flags() to *_cpuflags() x86, kaslr: Raise the maximum virtual address to -1 GiB on x86_64 x86, kaslr: Report kernel offset on panic x86, kaslr: Select random position from e820 maps x86, kaslr: Provide randomness functions x86, kaslr: Return location from decompress_kernel x86, boot: Move CPU flags out of cpucheck x86, relocs: Add more per-cpu gold special casestirimbino
commit
f4bcd8ccdd
@ -0,0 +1,316 @@ |
||||
#include "misc.h" |
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE |
||||
#include <asm/msr.h> |
||||
#include <asm/archrandom.h> |
||||
#include <asm/e820.h> |
||||
|
||||
#include <generated/compile.h> |
||||
#include <linux/module.h> |
||||
#include <linux/uts.h> |
||||
#include <linux/utsname.h> |
||||
#include <generated/utsrelease.h> |
||||
|
||||
/* Simplified build-specific string for starting entropy. */ |
||||
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" |
||||
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; |
||||
|
||||
#define I8254_PORT_CONTROL 0x43 |
||||
#define I8254_PORT_COUNTER0 0x40 |
||||
#define I8254_CMD_READBACK 0xC0 |
||||
#define I8254_SELECT_COUNTER0 0x02 |
||||
#define I8254_STATUS_NOTREADY 0x40 |
||||
static inline u16 i8254(void) |
||||
{ |
||||
u16 status, timer; |
||||
|
||||
do { |
||||
outb(I8254_PORT_CONTROL, |
||||
I8254_CMD_READBACK | I8254_SELECT_COUNTER0); |
||||
status = inb(I8254_PORT_COUNTER0); |
||||
timer = inb(I8254_PORT_COUNTER0); |
||||
timer |= inb(I8254_PORT_COUNTER0) << 8; |
||||
} while (status & I8254_STATUS_NOTREADY); |
||||
|
||||
return timer; |
||||
} |
||||
|
||||
static unsigned long rotate_xor(unsigned long hash, const void *area, |
||||
size_t size) |
||||
{ |
||||
size_t i; |
||||
unsigned long *ptr = (unsigned long *)area; |
||||
|
||||
for (i = 0; i < size / sizeof(hash); i++) { |
||||
/* Rotate by odd number of bits and XOR. */ |
||||
hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); |
||||
hash ^= ptr[i]; |
||||
} |
||||
|
||||
return hash; |
||||
} |
||||
|
||||
/* Attempt to create a simple but unpredictable starting entropy. */ |
||||
static unsigned long get_random_boot(void) |
||||
{ |
||||
unsigned long hash = 0; |
||||
|
||||
hash = rotate_xor(hash, build_str, sizeof(build_str)); |
||||
hash = rotate_xor(hash, real_mode, sizeof(*real_mode)); |
||||
|
||||
return hash; |
||||
} |
||||
|
||||
static unsigned long get_random_long(void) |
||||
{ |
||||
#ifdef CONFIG_X86_64 |
||||
const unsigned long mix_const = 0x5d6008cbf3848dd3UL; |
||||
#else |
||||
const unsigned long mix_const = 0x3f39e593UL; |
||||
#endif |
||||
unsigned long raw, random = get_random_boot(); |
||||
bool use_i8254 = true; |
||||
|
||||
debug_putstr("KASLR using"); |
||||
|
||||
if (has_cpuflag(X86_FEATURE_RDRAND)) { |
||||
debug_putstr(" RDRAND"); |
||||
if (rdrand_long(&raw)) { |
||||
random ^= raw; |
||||
use_i8254 = false; |
||||
} |
||||
} |
||||
|
||||
if (has_cpuflag(X86_FEATURE_TSC)) { |
||||
debug_putstr(" RDTSC"); |
||||
rdtscll(raw); |
||||
|
||||
random ^= raw; |
||||
use_i8254 = false; |
||||
} |
||||
|
||||
if (use_i8254) { |
||||
debug_putstr(" i8254"); |
||||
random ^= i8254(); |
||||
} |
||||
|
||||
/* Circular multiply for better bit diffusion */ |
||||
asm("mul %3" |
||||
: "=a" (random), "=d" (raw) |
||||
: "a" (random), "rm" (mix_const)); |
||||
random += raw; |
||||
|
||||
debug_putstr("...\n"); |
||||
|
||||
return random; |
||||
} |
||||
|
||||
struct mem_vector { |
||||
unsigned long start; |
||||
unsigned long size; |
||||
}; |
||||
|
||||
#define MEM_AVOID_MAX 5 |
||||
struct mem_vector mem_avoid[MEM_AVOID_MAX]; |
||||
|
||||
static bool mem_contains(struct mem_vector *region, struct mem_vector *item) |
||||
{ |
||||
/* Item at least partially before region. */ |
||||
if (item->start < region->start) |
||||
return false; |
||||
/* Item at least partially after region. */ |
||||
if (item->start + item->size > region->start + region->size) |
||||
return false; |
||||
return true; |
||||
} |
||||
|
||||
static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) |
||||
{ |
||||
/* Item one is entirely before item two. */ |
||||
if (one->start + one->size <= two->start) |
||||
return false; |
||||
/* Item one is entirely after item two. */ |
||||
if (one->start >= two->start + two->size) |
||||
return false; |
||||
return true; |
||||
} |
||||
|
||||
static void mem_avoid_init(unsigned long input, unsigned long input_size, |
||||
unsigned long output, unsigned long output_size) |
||||
{ |
||||
u64 initrd_start, initrd_size; |
||||
u64 cmd_line, cmd_line_size; |
||||
unsigned long unsafe, unsafe_len; |
||||
char *ptr; |
||||
|
||||
/*
|
||||
* Avoid the region that is unsafe to overlap during |
||||
* decompression (see calculations at top of misc.c). |
||||
*/ |
||||
unsafe_len = (output_size >> 12) + 32768 + 18; |
||||
unsafe = (unsigned long)input + input_size - unsafe_len; |
||||
mem_avoid[0].start = unsafe; |
||||
mem_avoid[0].size = unsafe_len; |
||||
|
||||
/* Avoid initrd. */ |
||||
initrd_start = (u64)real_mode->ext_ramdisk_image << 32; |
||||
initrd_start |= real_mode->hdr.ramdisk_image; |
||||
initrd_size = (u64)real_mode->ext_ramdisk_size << 32; |
||||
initrd_size |= real_mode->hdr.ramdisk_size; |
||||
mem_avoid[1].start = initrd_start; |
||||
mem_avoid[1].size = initrd_size; |
||||
|
||||
/* Avoid kernel command line. */ |
||||
cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32; |
||||
cmd_line |= real_mode->hdr.cmd_line_ptr; |
||||
/* Calculate size of cmd_line. */ |
||||
ptr = (char *)(unsigned long)cmd_line; |
||||
for (cmd_line_size = 0; ptr[cmd_line_size++]; ) |
||||
; |
||||
mem_avoid[2].start = cmd_line; |
||||
mem_avoid[2].size = cmd_line_size; |
||||
|
||||
/* Avoid heap memory. */ |
||||
mem_avoid[3].start = (unsigned long)free_mem_ptr; |
||||
mem_avoid[3].size = BOOT_HEAP_SIZE; |
||||
|
||||
/* Avoid stack memory. */ |
||||
mem_avoid[4].start = (unsigned long)free_mem_end_ptr; |
||||
mem_avoid[4].size = BOOT_STACK_SIZE; |
||||
} |
||||
|
||||
/* Does this memory vector overlap a known avoided area? */ |
||||
bool mem_avoid_overlap(struct mem_vector *img) |
||||
{ |
||||
int i; |
||||
|
||||
for (i = 0; i < MEM_AVOID_MAX; i++) { |
||||
if (mem_overlaps(img, &mem_avoid[i])) |
||||
return true; |
||||
} |
||||
|
||||
return false; |
||||
} |
||||
|
||||
unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; |
||||
unsigned long slot_max = 0; |
||||
|
||||
static void slots_append(unsigned long addr) |
||||
{ |
||||
/* Overflowing the slots list should be impossible. */ |
||||
if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET / |
||||
CONFIG_PHYSICAL_ALIGN) |
||||
return; |
||||
|
||||
slots[slot_max++] = addr; |
||||
} |
||||
|
||||
static unsigned long slots_fetch_random(void) |
||||
{ |
||||
/* Handle case of no slots stored. */ |
||||
if (slot_max == 0) |
||||
return 0; |
||||
|
||||
return slots[get_random_long() % slot_max]; |
||||
} |
||||
|
||||
static void process_e820_entry(struct e820entry *entry, |
||||
unsigned long minimum, |
||||
unsigned long image_size) |
||||
{ |
||||
struct mem_vector region, img; |
||||
|
||||
/* Skip non-RAM entries. */ |
||||
if (entry->type != E820_RAM) |
||||
return; |
||||
|
||||
/* Ignore entries entirely above our maximum. */ |
||||
if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET) |
||||
return; |
||||
|
||||
/* Ignore entries entirely below our minimum. */ |
||||
if (entry->addr + entry->size < minimum) |
||||
return; |
||||
|
||||
region.start = entry->addr; |
||||
region.size = entry->size; |
||||
|
||||
/* Potentially raise address to minimum location. */ |
||||
if (region.start < minimum) |
||||
region.start = minimum; |
||||
|
||||
/* Potentially raise address to meet alignment requirements. */ |
||||
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); |
||||
|
||||
/* Did we raise the address above the bounds of this e820 region? */ |
||||
if (region.start > entry->addr + entry->size) |
||||
return; |
||||
|
||||
/* Reduce size by any delta from the original address. */ |
||||
region.size -= region.start - entry->addr; |
||||
|
||||
/* Reduce maximum size to fit end of image within maximum limit. */ |
||||
if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET) |
||||
region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start; |
||||
|
||||
/* Walk each aligned slot and check for avoided areas. */ |
||||
for (img.start = region.start, img.size = image_size ; |
||||
mem_contains(®ion, &img) ; |
||||
img.start += CONFIG_PHYSICAL_ALIGN) { |
||||
if (mem_avoid_overlap(&img)) |
||||
continue; |
||||
slots_append(img.start); |
||||
} |
||||
} |
||||
|
||||
static unsigned long find_random_addr(unsigned long minimum, |
||||
unsigned long size) |
||||
{ |
||||
int i; |
||||
unsigned long addr; |
||||
|
||||
/* Make sure minimum is aligned. */ |
||||
minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); |
||||
|
||||
/* Verify potential e820 positions, appending to slots list. */ |
||||
for (i = 0; i < real_mode->e820_entries; i++) { |
||||
process_e820_entry(&real_mode->e820_map[i], minimum, size); |
||||
} |
||||
|
||||
return slots_fetch_random(); |
||||
} |
||||
|
||||
unsigned char *choose_kernel_location(unsigned char *input, |
||||
unsigned long input_size, |
||||
unsigned char *output, |
||||
unsigned long output_size) |
||||
{ |
||||
unsigned long choice = (unsigned long)output; |
||||
unsigned long random; |
||||
|
||||
if (cmdline_find_option_bool("nokaslr")) { |
||||
debug_putstr("KASLR disabled...\n"); |
||||
goto out; |
||||
} |
||||
|
||||
/* Record the various known unsafe memory ranges. */ |
||||
mem_avoid_init((unsigned long)input, input_size, |
||||
(unsigned long)output, output_size); |
||||
|
||||
/* Walk e820 and find a random address. */ |
||||
random = find_random_addr(choice, output_size); |
||||
if (!random) { |
||||
debug_putstr("KASLR could not find suitable E820 region...\n"); |
||||
goto out; |
||||
} |
||||
|
||||
/* Always enforce the minimum. */ |
||||
if (random < choice) |
||||
goto out; |
||||
|
||||
choice = random; |
||||
out: |
||||
return (unsigned char *)choice; |
||||
} |
||||
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */ |
@ -0,0 +1,12 @@ |
||||
#ifdef CONFIG_RANDOMIZE_BASE |
||||
|
||||
#include "../cpuflags.c" |
||||
|
||||
bool has_cpuflag(int flag) |
||||
{ |
||||
get_cpuflags(); |
||||
|
||||
return test_bit(flag, cpu.flags); |
||||
} |
||||
|
||||
#endif |
@ -0,0 +1,104 @@ |
||||
#include <linux/types.h> |
||||
#include "bitops.h" |
||||
|
||||
#include <asm/processor-flags.h> |
||||
#include <asm/required-features.h> |
||||
#include <asm/msr-index.h> |
||||
#include "cpuflags.h" |
||||
|
||||
struct cpu_features cpu; |
||||
u32 cpu_vendor[3]; |
||||
|
||||
static bool loaded_flags; |
||||
|
||||
static int has_fpu(void) |
||||
{ |
||||
u16 fcw = -1, fsw = -1; |
||||
unsigned long cr0; |
||||
|
||||
asm volatile("mov %%cr0,%0" : "=r" (cr0)); |
||||
if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { |
||||
cr0 &= ~(X86_CR0_EM|X86_CR0_TS); |
||||
asm volatile("mov %0,%%cr0" : : "r" (cr0)); |
||||
} |
||||
|
||||
asm volatile("fninit ; fnstsw %0 ; fnstcw %1" |
||||
: "+m" (fsw), "+m" (fcw)); |
||||
|
||||
return fsw == 0 && (fcw & 0x103f) == 0x003f; |
||||
} |
||||
|
||||
int has_eflag(unsigned long mask) |
||||
{ |
||||
unsigned long f0, f1; |
||||
|
||||
asm volatile("pushf \n\t" |
||||
"pushf \n\t" |
||||
"pop %0 \n\t" |
||||
"mov %0,%1 \n\t" |
||||
"xor %2,%1 \n\t" |
||||
"push %1 \n\t" |
||||
"popf \n\t" |
||||
"pushf \n\t" |
||||
"pop %1 \n\t" |
||||
"popf" |
||||
: "=&r" (f0), "=&r" (f1) |
||||
: "ri" (mask)); |
||||
|
||||
return !!((f0^f1) & mask); |
||||
} |
||||
|
||||
/* Handle x86_32 PIC using ebx. */ |
||||
#if defined(__i386__) && defined(__PIC__) |
||||
# define EBX_REG "=r" |
||||
#else |
||||
# define EBX_REG "=b" |
||||
#endif |
||||
|
||||
static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d) |
||||
{ |
||||
asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t" |
||||
"cpuid \n\t" |
||||
".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t" |
||||
: "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) |
||||
: "a" (id) |
||||
); |
||||
} |
||||
|
||||
void get_cpuflags(void) |
||||
{ |
||||
u32 max_intel_level, max_amd_level; |
||||
u32 tfms; |
||||
u32 ignored; |
||||
|
||||
if (loaded_flags) |
||||
return; |
||||
loaded_flags = true; |
||||
|
||||
if (has_fpu()) |
||||
set_bit(X86_FEATURE_FPU, cpu.flags); |
||||
|
||||
if (has_eflag(X86_EFLAGS_ID)) { |
||||
cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2], |
||||
&cpu_vendor[1]); |
||||
|
||||
if (max_intel_level >= 0x00000001 && |
||||
max_intel_level <= 0x0000ffff) { |
||||
cpuid(0x1, &tfms, &ignored, &cpu.flags[4], |
||||
&cpu.flags[0]); |
||||
cpu.level = (tfms >> 8) & 15; |
||||
cpu.model = (tfms >> 4) & 15; |
||||
if (cpu.level >= 6) |
||||
cpu.model += ((tfms >> 16) & 0xf) << 4; |
||||
} |
||||
|
||||
cpuid(0x80000000, &max_amd_level, &ignored, &ignored, |
||||
&ignored); |
||||
|
||||
if (max_amd_level >= 0x80000001 && |
||||
max_amd_level <= 0x8000ffff) { |
||||
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], |
||||
&cpu.flags[1]); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,19 @@ |
||||
#ifndef BOOT_CPUFLAGS_H |
||||
#define BOOT_CPUFLAGS_H |
||||
|
||||
#include <asm/cpufeature.h> |
||||
#include <asm/processor-flags.h> |
||||
|
||||
struct cpu_features { |
||||
int level; /* Family, or 64 for x86-64 */ |
||||
int model; |
||||
u32 flags[NCAPINTS]; |
||||
}; |
||||
|
||||
extern struct cpu_features cpu; |
||||
extern u32 cpu_vendor[3]; |
||||
|
||||
int has_eflag(unsigned long mask); |
||||
void get_cpuflags(void); |
||||
|
||||
#endif |
Loading…
Reference in new issue