@ -261,6 +261,7 @@
# include <linux/syscalls.h>
# include <linux/syscalls.h>
# include <linux/completion.h>
# include <linux/completion.h>
# include <linux/uuid.h>
# include <linux/uuid.h>
# include <crypto/chacha20.h>
# include <asm/processor.h>
# include <asm/processor.h>
# include <asm/uaccess.h>
# include <asm/uaccess.h>
@ -413,6 +414,34 @@ static struct fasync_struct *fasync;
static DEFINE_SPINLOCK ( random_ready_list_lock ) ;
static DEFINE_SPINLOCK ( random_ready_list_lock ) ;
static LIST_HEAD ( random_ready_list ) ;
static LIST_HEAD ( random_ready_list ) ;
struct crng_state {
__u32 state [ 16 ] ;
unsigned long init_time ;
spinlock_t lock ;
} ;
struct crng_state primary_crng = {
. lock = __SPIN_LOCK_UNLOCKED ( primary_crng . lock ) ,
} ;
/*
* crng_init = 0 - - > Uninitialized
* 1 - - > Initialized
* 2 - - > Initialized from input_pool
*
* crng_init is protected by primary_crng - > lock , and only increases
* its value ( from 0 - > 1 - > 2 ) .
*/
static int crng_init = 0 ;
# define crng_ready() (likely(crng_init > 0))
static int crng_init_cnt = 0 ;
# define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng ( struct crng_state * crng ,
__u8 out [ CHACHA20_BLOCK_SIZE ] ) ;
static void _crng_backtrack_protect ( struct crng_state * crng ,
__u8 tmp [ CHACHA20_BLOCK_SIZE ] , int used ) ;
static void process_random_ready_list ( void ) ;
/**********************************************************************
/**********************************************************************
*
*
* OS independent entropy store . Here are the functions which handle
* OS independent entropy store . Here are the functions which handle
@ -442,10 +471,15 @@ struct entropy_store {
__u8 last_data [ EXTRACT_SIZE ] ;
__u8 last_data [ EXTRACT_SIZE ] ;
} ;
} ;
static ssize_t extract_entropy ( struct entropy_store * r , void * buf ,
size_t nbytes , int min , int rsvd ) ;
static ssize_t _extract_entropy ( struct entropy_store * r , void * buf ,
size_t nbytes , int fips ) ;
static void crng_reseed ( struct crng_state * crng , struct entropy_store * r ) ;
static void push_to_pool ( struct work_struct * work ) ;
static void push_to_pool ( struct work_struct * work ) ;
static __u32 input_pool_data [ INPUT_POOL_WORDS ] ;
static __u32 input_pool_data [ INPUT_POOL_WORDS ] ;
static __u32 blocking_pool_data [ OUTPUT_POOL_WORDS ] ;
static __u32 blocking_pool_data [ OUTPUT_POOL_WORDS ] ;
static __u32 nonblocking_pool_data [ OUTPUT_POOL_WORDS ] ;
static struct entropy_store input_pool = {
static struct entropy_store input_pool = {
. poolinfo = & poolinfo_table [ 0 ] ,
. poolinfo = & poolinfo_table [ 0 ] ,
@ -466,16 +500,6 @@ static struct entropy_store blocking_pool = {
push_to_pool ) ,
push_to_pool ) ,
} ;
} ;
static struct entropy_store nonblocking_pool = {
. poolinfo = & poolinfo_table [ 1 ] ,
. name = " nonblocking " ,
. pull = & input_pool ,
. lock = __SPIN_LOCK_UNLOCKED ( nonblocking_pool . lock ) ,
. pool = nonblocking_pool_data ,
. push_work = __WORK_INITIALIZER ( nonblocking_pool . push_work ,
push_to_pool ) ,
} ;
static __u32 const twist_table [ 8 ] = {
static __u32 const twist_table [ 8 ] = {
0x00000000 , 0x3b6e20c8 , 0x76dc4190 , 0x4db26158 ,
0x00000000 , 0x3b6e20c8 , 0x76dc4190 , 0x4db26158 ,
0xedb88320 , 0xd6d6a3e8 , 0x9b64c2b0 , 0xa00ae278 } ;
0xedb88320 , 0xd6d6a3e8 , 0x9b64c2b0 , 0xa00ae278 } ;
@ -678,12 +702,6 @@ retry:
if ( ! r - > initialized & & r - > entropy_total > 128 ) {
if ( ! r - > initialized & & r - > entropy_total > 128 ) {
r - > initialized = 1 ;
r - > initialized = 1 ;
r - > entropy_total = 0 ;
r - > entropy_total = 0 ;
if ( r = = & nonblocking_pool ) {
prandom_reseed_late ( ) ;
process_random_ready_list ( ) ;
wake_up_all ( & urandom_init_wait ) ;
pr_notice ( " random: %s pool is initialized \n " , r - > name ) ;
}
}
}
trace_credit_entropy_bits ( r - > name , nbits ,
trace_credit_entropy_bits ( r - > name , nbits ,
@ -693,47 +711,264 @@ retry:
if ( r = = & input_pool ) {
if ( r = = & input_pool ) {
int entropy_bits = entropy_count > > ENTROPY_SHIFT ;
int entropy_bits = entropy_count > > ENTROPY_SHIFT ;
if ( crng_init < 2 & & entropy_bits > = 128 ) {
crng_reseed ( & primary_crng , r ) ;
entropy_bits = r - > entropy_count > > ENTROPY_SHIFT ;
}
/* should we wake readers? */
/* should we wake readers? */
if ( entropy_bits > = random_read_wakeup_bits ) {
if ( entropy_bits > = random_read_wakeup_bits ) {
wake_up_interruptible ( & random_read_wait ) ;
wake_up_interruptible ( & random_read_wait ) ;
kill_fasync ( & fasync , SIGIO , POLL_IN ) ;
kill_fasync ( & fasync , SIGIO , POLL_IN ) ;
}
}
/* If the input pool is getting full, send some
/* If the input pool is getting full, send some
* entropy to the two output pools , flipping back and
* entropy to the blocking pool until it is 75 % full .
* forth between them , until the output pools are 75 %
* full .
*/
*/
if ( entropy_bits > random_write_wakeup_bits & &
if ( entropy_bits > random_write_wakeup_bits & &
r - > initialized & &
r - > initialized & &
r - > entropy_total > = 2 * random_read_wakeup_bits ) {
r - > entropy_total > = 2 * random_read_wakeup_bits ) {
static struct entropy_store * last = & blocking_pool ;
struct entropy_store * other = & blocking_pool ;
struct entropy_store * other = & blocking_pool ;
if ( last = = & blocking_pool )
other = & nonblocking_pool ;
if ( other - > entropy_count < =
if ( other - > entropy_count < =
3 * other - > poolinfo - > poolfracbits / 4 )
3 * other - > poolinfo - > poolfracbits / 4 ) {
last = other ;
schedule_work ( & other - > push_work ) ;
if ( last - > entropy_count < =
3 * last - > poolinfo - > poolfracbits / 4 ) {
schedule_work ( & last - > push_work ) ;
r - > entropy_total = 0 ;
r - > entropy_total = 0 ;
}
}
}
}
}
}
}
}
static void credit_entropy_bits_safe ( struct entropy_store * r , int nbits )
static int credit_entropy_bits_safe ( struct entropy_store * r , int nbits )
{
{
const int nbits_max = ( int ) ( ~ 0U > > ( ENTROPY_SHIFT + 1 ) ) ;
const int nbits_max = ( int ) ( ~ 0U > > ( ENTROPY_SHIFT + 1 ) ) ;
if ( nbits < 0 )
return - EINVAL ;
/* Cap the value to avoid overflows */
/* Cap the value to avoid overflows */
nbits = min ( nbits , nbits_max ) ;
nbits = min ( nbits , nbits_max ) ;
nbits = max ( nbits , - nbits_max ) ;
credit_entropy_bits ( r , nbits ) ;
credit_entropy_bits ( r , nbits ) ;
return 0 ;
}
/*********************************************************************
*
* CRNG using CHACHA20
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define CRNG_RESEED_INTERVAL (300*HZ)
static DECLARE_WAIT_QUEUE_HEAD ( crng_init_wait ) ;
# ifdef CONFIG_NUMA
/*
* Hack to deal with crazy userspace progams when they are all trying
* to access / dev / urandom in parallel . The programs are almost
* certainly doing something terribly wrong , but we ' ll work around
* their brain damage .
*/
static struct crng_state * * crng_node_pool __read_mostly ;
# endif
static void crng_initialize ( struct crng_state * crng )
{
int i ;
unsigned long rv ;
memcpy ( & crng - > state [ 0 ] , " expand 32-byte k " , 16 ) ;
if ( crng = = & primary_crng )
_extract_entropy ( & input_pool , & crng - > state [ 4 ] ,
sizeof ( __u32 ) * 12 , 0 ) ;
else
get_random_bytes ( & crng - > state [ 4 ] , sizeof ( __u32 ) * 12 ) ;
for ( i = 4 ; i < 16 ; i + + ) {
if ( ! arch_get_random_seed_long ( & rv ) & &
! arch_get_random_long ( & rv ) )
rv = random_get_entropy ( ) ;
crng - > state [ i ] ^ = rv ;
}
crng - > init_time = jiffies - CRNG_RESEED_INTERVAL - 1 ;
}
static int crng_fast_load ( const char * cp , size_t len )
{
unsigned long flags ;
char * p ;
if ( ! spin_trylock_irqsave ( & primary_crng . lock , flags ) )
return 0 ;
if ( crng_ready ( ) ) {
spin_unlock_irqrestore ( & primary_crng . lock , flags ) ;
return 0 ;
}
p = ( unsigned char * ) & primary_crng . state [ 4 ] ;
while ( len > 0 & & crng_init_cnt < CRNG_INIT_CNT_THRESH ) {
p [ crng_init_cnt % CHACHA20_KEY_SIZE ] ^ = * cp ;
cp + + ; crng_init_cnt + + ; len - - ;
}
if ( crng_init_cnt > = CRNG_INIT_CNT_THRESH ) {
crng_init = 1 ;
wake_up_interruptible ( & crng_init_wait ) ;
pr_notice ( " random: fast init done \n " ) ;
}
spin_unlock_irqrestore ( & primary_crng . lock , flags ) ;
return 1 ;
}
static void crng_reseed ( struct crng_state * crng , struct entropy_store * r )
{
unsigned long flags ;
int i , num ;
union {
__u8 block [ CHACHA20_BLOCK_SIZE ] ;
__u32 key [ 8 ] ;
} buf ;
if ( r ) {
num = extract_entropy ( r , & buf , 32 , 16 , 0 ) ;
if ( num = = 0 )
return ;
} else {
_extract_crng ( & primary_crng , buf . block ) ;
_crng_backtrack_protect ( & primary_crng , buf . block ,
CHACHA20_KEY_SIZE ) ;
}
spin_lock_irqsave ( & primary_crng . lock , flags ) ;
for ( i = 0 ; i < 8 ; i + + ) {
unsigned long rv ;
if ( ! arch_get_random_seed_long ( & rv ) & &
! arch_get_random_long ( & rv ) )
rv = random_get_entropy ( ) ;
crng - > state [ i + 4 ] ^ = buf . key [ i ] ^ rv ;
}
memzero_explicit ( & buf , sizeof ( buf ) ) ;
crng - > init_time = jiffies ;
if ( crng = = & primary_crng & & crng_init < 2 ) {
crng_init = 2 ;
process_random_ready_list ( ) ;
wake_up_interruptible ( & crng_init_wait ) ;
pr_notice ( " random: crng init done \n " ) ;
}
spin_unlock_irqrestore ( & primary_crng . lock , flags ) ;
}
static inline void maybe_reseed_primary_crng ( void )
{
if ( crng_init > 2 & &
time_after ( jiffies , primary_crng . init_time + CRNG_RESEED_INTERVAL ) )
crng_reseed ( & primary_crng , & input_pool ) ;
}
static inline void crng_wait_ready ( void )
{
wait_event_interruptible ( crng_init_wait , crng_ready ( ) ) ;
}
static void _extract_crng ( struct crng_state * crng ,
__u8 out [ CHACHA20_BLOCK_SIZE ] )
{
unsigned long v , flags ;
if ( crng_init > 1 & &
time_after ( jiffies , crng - > init_time + CRNG_RESEED_INTERVAL ) )
crng_reseed ( crng , crng = = & primary_crng ? & input_pool : NULL ) ;
spin_lock_irqsave ( & crng - > lock , flags ) ;
if ( arch_get_random_long ( & v ) )
crng - > state [ 14 ] ^ = v ;
chacha20_block ( & crng - > state [ 0 ] , out ) ;
if ( crng - > state [ 12 ] = = 0 )
crng - > state [ 13 ] + + ;
spin_unlock_irqrestore ( & crng - > lock , flags ) ;
}
static void extract_crng ( __u8 out [ CHACHA20_BLOCK_SIZE ] )
{
struct crng_state * crng = NULL ;
# ifdef CONFIG_NUMA
if ( crng_node_pool )
crng = crng_node_pool [ numa_node_id ( ) ] ;
if ( crng = = NULL )
# endif
crng = & primary_crng ;
_extract_crng ( crng , out ) ;
}
/*
* Use the leftover bytes from the CRNG block output ( if there is
* enough ) to mutate the CRNG key to provide backtracking protection .
*/
static void _crng_backtrack_protect ( struct crng_state * crng ,
__u8 tmp [ CHACHA20_BLOCK_SIZE ] , int used )
{
unsigned long flags ;
__u32 * s , * d ;
int i ;
used = round_up ( used , sizeof ( __u32 ) ) ;
if ( used + CHACHA20_KEY_SIZE > CHACHA20_BLOCK_SIZE ) {
extract_crng ( tmp ) ;
used = 0 ;
}
spin_lock_irqsave ( & crng - > lock , flags ) ;
s = ( __u32 * ) & tmp [ used ] ;
d = & crng - > state [ 4 ] ;
for ( i = 0 ; i < 8 ; i + + )
* d + + ^ = * s + + ;
spin_unlock_irqrestore ( & crng - > lock , flags ) ;
}
static void crng_backtrack_protect ( __u8 tmp [ CHACHA20_BLOCK_SIZE ] , int used )
{
struct crng_state * crng = NULL ;
# ifdef CONFIG_NUMA
if ( crng_node_pool )
crng = crng_node_pool [ numa_node_id ( ) ] ;
if ( crng = = NULL )
# endif
crng = & primary_crng ;
_crng_backtrack_protect ( crng , tmp , used ) ;
}
static ssize_t extract_crng_user ( void __user * buf , size_t nbytes )
{
ssize_t ret = 0 , i = CHACHA20_BLOCK_SIZE ;
__u8 tmp [ CHACHA20_BLOCK_SIZE ] ;
int large_request = ( nbytes > 256 ) ;
while ( nbytes ) {
if ( large_request & & need_resched ( ) ) {
if ( signal_pending ( current ) ) {
if ( ret = = 0 )
ret = - ERESTARTSYS ;
break ;
}
schedule ( ) ;
}
extract_crng ( tmp ) ;
i = min_t ( int , nbytes , CHACHA20_BLOCK_SIZE ) ;
if ( copy_to_user ( buf , tmp , i ) ) {
ret = - EFAULT ;
break ;
}
nbytes - = i ;
buf + = i ;
ret + = i ;
}
crng_backtrack_protect ( tmp , i ) ;
/* Wipe data just written to memory */
memzero_explicit ( tmp , sizeof ( tmp ) ) ;
return ret ;
}
}
/*********************************************************************
/*********************************************************************
*
*
* Entropy input management
* Entropy input management
@ -750,12 +985,12 @@ struct timer_rand_state {
# define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
# define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
/*
/*
* Add device - or boot - specific data to the input and nonblocking
* Add device - or boot - specific data to the input pool to help
* pools to help initialize them to unique values .
* initialize i t.
*
*
* None of this adds any entropy , it is meant to avoid the
* None of this adds any entropy ; it is meant to avoid the problem of
* problem of the nonblocking pool having similar initial state
* the entropy pool having similar initial state across largely
* across largely identical devices .
* identical devices .
*/
*/
void add_device_randomness ( const void * buf , unsigned int size )
void add_device_randomness ( const void * buf , unsigned int size )
{
{
@ -767,11 +1002,6 @@ void add_device_randomness(const void *buf, unsigned int size)
_mix_pool_bytes ( & input_pool , buf , size ) ;
_mix_pool_bytes ( & input_pool , buf , size ) ;
_mix_pool_bytes ( & input_pool , & time , sizeof ( time ) ) ;
_mix_pool_bytes ( & input_pool , & time , sizeof ( time ) ) ;
spin_unlock_irqrestore ( & input_pool . lock , flags ) ;
spin_unlock_irqrestore ( & input_pool . lock , flags ) ;
spin_lock_irqsave ( & nonblocking_pool . lock , flags ) ;
_mix_pool_bytes ( & nonblocking_pool , buf , size ) ;
_mix_pool_bytes ( & nonblocking_pool , & time , sizeof ( time ) ) ;
spin_unlock_irqrestore ( & nonblocking_pool . lock , flags ) ;
}
}
EXPORT_SYMBOL ( add_device_randomness ) ;
EXPORT_SYMBOL ( add_device_randomness ) ;
@ -802,7 +1032,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample . jiffies = jiffies ;
sample . jiffies = jiffies ;
sample . cycles = random_get_entropy ( ) ;
sample . cycles = random_get_entropy ( ) ;
sample . num = num ;
sample . num = num ;
r = nonblocking_pool . initialized ? & input_pool : & nonblocking _pool ;
r = & input_pool ;
mix_pool_bytes ( r , & sample , sizeof ( sample ) ) ;
mix_pool_bytes ( r , & sample , sizeof ( sample ) ) ;
/*
/*
@ -918,11 +1148,21 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix ( fast_pool ) ;
fast_mix ( fast_pool ) ;
add_interrupt_bench ( cycles ) ;
add_interrupt_bench ( cycles ) ;
if ( ! crng_ready ( ) ) {
if ( ( fast_pool - > count > = 64 ) & &
crng_fast_load ( ( char * ) fast_pool - > pool ,
sizeof ( fast_pool - > pool ) ) ) {
fast_pool - > count = 0 ;
fast_pool - > last = now ;
}
return ;
}
if ( ( fast_pool - > count < 64 ) & &
if ( ( fast_pool - > count < 64 ) & &
! time_after ( now , fast_pool - > last + HZ ) )
! time_after ( now , fast_pool - > last + HZ ) )
return ;
return ;
r = nonblocking_pool . initialized ? & input_pool : & nonblocking_pool ;
r = & input_pool ;
if ( ! spin_trylock ( & r - > lock ) )
if ( ! spin_trylock ( & r - > lock ) )
return ;
return ;
@ -946,6 +1186,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
/* award one bit for the contents of the fast pool */
/* award one bit for the contents of the fast pool */
credit_entropy_bits ( r , credit + 1 ) ;
credit_entropy_bits ( r , credit + 1 ) ;
}
}
EXPORT_SYMBOL_GPL ( add_interrupt_randomness ) ;
# ifdef CONFIG_BLOCK
# ifdef CONFIG_BLOCK
void add_disk_randomness ( struct gendisk * disk )
void add_disk_randomness ( struct gendisk * disk )
@ -965,9 +1206,6 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
*
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static ssize_t extract_entropy ( struct entropy_store * r , void * buf ,
size_t nbytes , int min , int rsvd ) ;
/*
/*
* This utility inline function is responsible for transferring entropy
* This utility inline function is responsible for transferring entropy
* from the primary pool to the secondary extraction pool . We make
* from the primary pool to the secondary extraction pool . We make
@ -1142,6 +1380,36 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
memzero_explicit ( & hash , sizeof ( hash ) ) ;
memzero_explicit ( & hash , sizeof ( hash ) ) ;
}
}
static ssize_t _extract_entropy ( struct entropy_store * r , void * buf ,
size_t nbytes , int fips )
{
ssize_t ret = 0 , i ;
__u8 tmp [ EXTRACT_SIZE ] ;
unsigned long flags ;
while ( nbytes ) {
extract_buf ( r , tmp ) ;
if ( fips ) {
spin_lock_irqsave ( & r - > lock , flags ) ;
if ( ! memcmp ( tmp , r - > last_data , EXTRACT_SIZE ) )
panic ( " Hardware RNG duplicated output! \n " ) ;
memcpy ( r - > last_data , tmp , EXTRACT_SIZE ) ;
spin_unlock_irqrestore ( & r - > lock , flags ) ;
}
i = min_t ( int , nbytes , EXTRACT_SIZE ) ;
memcpy ( buf , tmp , i ) ;
nbytes - = i ;
buf + = i ;
ret + = i ;
}
/* Wipe data just returned from memory */
memzero_explicit ( tmp , sizeof ( tmp ) ) ;
return ret ;
}
/*
/*
* This function extracts randomness from the " entropy pool " , and
* This function extracts randomness from the " entropy pool " , and
* returns it in a buffer .
* returns it in a buffer .
@ -1154,7 +1422,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
static ssize_t extract_entropy ( struct entropy_store * r , void * buf ,
static ssize_t extract_entropy ( struct entropy_store * r , void * buf ,
size_t nbytes , int min , int reserved )
size_t nbytes , int min , int reserved )
{
{
ssize_t ret = 0 , i ;
__u8 tmp [ EXTRACT_SIZE ] ;
__u8 tmp [ EXTRACT_SIZE ] ;
unsigned long flags ;
unsigned long flags ;
@ -1178,27 +1445,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
xfer_secondary_pool ( r , nbytes ) ;
xfer_secondary_pool ( r , nbytes ) ;
nbytes = account ( r , nbytes , min , reserved ) ;
nbytes = account ( r , nbytes , min , reserved ) ;
while ( nbytes ) {
return _extract_entropy ( r , buf , nbytes , fips_enabled ) ;
extract_buf ( r , tmp ) ;
if ( fips_enabled ) {
spin_lock_irqsave ( & r - > lock , flags ) ;
if ( ! memcmp ( tmp , r - > last_data , EXTRACT_SIZE ) )
panic ( " Hardware RNG duplicated output! \n " ) ;
memcpy ( r - > last_data , tmp , EXTRACT_SIZE ) ;
spin_unlock_irqrestore ( & r - > lock , flags ) ;
}
i = min_t ( int , nbytes , EXTRACT_SIZE ) ;
memcpy ( buf , tmp , i ) ;
nbytes - = i ;
buf + = i ;
ret + = i ;
}
/* Wipe data just returned from memory */
memzero_explicit ( tmp , sizeof ( tmp ) ) ;
return ret ;
}
}
/*
/*
@ -1253,15 +1500,28 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
*/
*/
void get_random_bytes ( void * buf , int nbytes )
void get_random_bytes ( void * buf , int nbytes )
{
{
__u8 tmp [ CHACHA20_BLOCK_SIZE ] ;
# if DEBUG_RANDOM_BOOT > 0
# if DEBUG_RANDOM_BOOT > 0
if ( unlikely ( nonblocking_pool . initialized = = 0 ) )
if ( ! crng_ready ( ) )
printk ( KERN_NOTICE " random: %pF get_random_bytes called "
printk ( KERN_NOTICE " random: %pF get_random_bytes called "
" with %d bits of entropy available \n " ,
" with crng_init = %d \n " , ( void * ) _RET_IP_ , crng_init ) ;
( void * ) _RET_IP_ ,
nonblocking_pool . entropy_total ) ;
# endif
# endif
trace_get_random_bytes ( nbytes , _RET_IP_ ) ;
trace_get_random_bytes ( nbytes , _RET_IP_ ) ;
extract_entropy ( & nonblocking_pool , buf , nbytes , 0 , 0 ) ;
while ( nbytes > = CHACHA20_BLOCK_SIZE ) {
extract_crng ( buf ) ;
buf + = CHACHA20_BLOCK_SIZE ;
nbytes - = CHACHA20_BLOCK_SIZE ;
}
if ( nbytes > 0 ) {
extract_crng ( tmp ) ;
memcpy ( buf , tmp , nbytes ) ;
crng_backtrack_protect ( tmp , nbytes ) ;
} else
crng_backtrack_protect ( tmp , CHACHA20_BLOCK_SIZE ) ;
memzero_explicit ( tmp , sizeof ( tmp ) ) ;
}
}
EXPORT_SYMBOL ( get_random_bytes ) ;
EXPORT_SYMBOL ( get_random_bytes ) ;
@ -1279,7 +1539,7 @@ int add_random_ready_callback(struct random_ready_callback *rdy)
unsigned long flags ;
unsigned long flags ;
int err = - EALREADY ;
int err = - EALREADY ;
if ( likely ( nonblocking_pool . initialized ) )
if ( crng_ready ( ) )
return err ;
return err ;
owner = rdy - > owner ;
owner = rdy - > owner ;
@ -1287,7 +1547,7 @@ int add_random_ready_callback(struct random_ready_callback *rdy)
return - ENOENT ;
return - ENOENT ;
spin_lock_irqsave ( & random_ready_list_lock , flags ) ;
spin_lock_irqsave ( & random_ready_list_lock , flags ) ;
if ( nonblocking_pool . initialized )
if ( crng_ready ( ) )
goto out ;
goto out ;
owner = NULL ;
owner = NULL ;
@ -1351,7 +1611,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
}
}
if ( nbytes )
if ( nbytes )
extract_entropy ( & nonblocking_pool , p , nbytes , 0 , 0 ) ;
get_random_bytes ( p , nbytes ) ;
}
}
EXPORT_SYMBOL ( get_random_bytes_arch ) ;
EXPORT_SYMBOL ( get_random_bytes_arch ) ;
@ -1394,9 +1654,31 @@ static void init_std_data(struct entropy_store *r)
*/
*/
static int rand_initialize ( void )
static int rand_initialize ( void )
{
{
# ifdef CONFIG_NUMA
int i ;
int num_nodes = num_possible_nodes ( ) ;
struct crng_state * crng ;
struct crng_state * * pool ;
# endif
init_std_data ( & input_pool ) ;
init_std_data ( & input_pool ) ;
init_std_data ( & blocking_pool ) ;
init_std_data ( & blocking_pool ) ;
init_std_data ( & nonblocking_pool ) ;
crng_initialize ( & primary_crng ) ;
# ifdef CONFIG_NUMA
pool = kmalloc ( num_nodes * sizeof ( void * ) ,
GFP_KERNEL | __GFP_NOFAIL | __GFP_ZERO ) ;
for ( i = 0 ; i < num_nodes ; i + + ) {
crng = kmalloc_node ( sizeof ( struct crng_state ) ,
GFP_KERNEL | __GFP_NOFAIL , i ) ;
spin_lock_init ( & crng - > lock ) ;
crng_initialize ( crng ) ;
pool [ i ] = crng ;
}
mb ( ) ;
crng_node_pool = pool ;
# endif
return 0 ;
return 0 ;
}
}
early_initcall ( rand_initialize ) ;
early_initcall ( rand_initialize ) ;
@ -1458,18 +1740,22 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
static ssize_t
static ssize_t
urandom_read ( struct file * file , char __user * buf , size_t nbytes , loff_t * ppos )
urandom_read ( struct file * file , char __user * buf , size_t nbytes , loff_t * ppos )
{
{
unsigned long flags ;
static int maxwarn = 10 ;
int ret ;
int ret ;
if ( unlikely ( nonblocking_pool . initialized = = 0 ) )
if ( ! crng_ready ( ) & & maxwarn > 0 ) {
printk_once ( KERN_NOTICE " random: %s urandom read "
maxwarn - - ;
" with %d bits of entropy available \n " ,
printk ( KERN_NOTICE " random: %s: uninitialized urandom read "
current - > comm , nonblocking_pool . entropy_total ) ;
" (%zd bytes read) \n " ,
current - > comm , nbytes ) ;
spin_lock_irqsave ( & primary_crng . lock , flags ) ;
crng_init_cnt = 0 ;
spin_unlock_irqrestore ( & primary_crng . lock , flags ) ;
}
nbytes = min_t ( size_t , nbytes , INT_MAX > > ( ENTROPY_SHIFT + 3 ) ) ;
nbytes = min_t ( size_t , nbytes , INT_MAX > > ( ENTROPY_SHIFT + 3 ) ) ;
ret = extract_entropy_user ( & nonblocking_pool , buf , nbytes ) ;
ret = extract_crng_user ( buf , nbytes ) ;
trace_urandom_read ( 8 * nbytes , 0 , ENTROPY_BITS ( & input_pool ) ) ;
trace_urandom_read ( 8 * nbytes , ENTROPY_BITS ( & nonblocking_pool ) ,
ENTROPY_BITS ( & input_pool ) ) ;
return ret ;
return ret ;
}
}
@ -1515,10 +1801,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
{
{
size_t ret ;
size_t ret ;
ret = write_pool ( & blocking_pool , buffer , count ) ;
ret = write_pool ( & input_pool , buffer , count ) ;
if ( ret )
return ret ;
ret = write_pool ( & nonblocking_pool , buffer , count ) ;
if ( ret )
if ( ret )
return ret ;
return ret ;
@ -1543,8 +1826,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return - EPERM ;
return - EPERM ;
if ( get_user ( ent_count , p ) )
if ( get_user ( ent_count , p ) )
return - EFAULT ;
return - EFAULT ;
credit_entropy_bits_safe ( & input_pool , ent_count ) ;
return credit_entropy_bits_safe ( & input_pool , ent_count ) ;
return 0 ;
case RNDADDENTROPY :
case RNDADDENTROPY :
if ( ! capable ( CAP_SYS_ADMIN ) )
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
return - EPERM ;
@ -1558,8 +1840,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
size ) ;
size ) ;
if ( retval < 0 )
if ( retval < 0 )
return retval ;
return retval ;
credit_entropy_bits_safe ( & input_pool , ent_count ) ;
return credit_entropy_bits_safe ( & input_pool , ent_count ) ;
return 0 ;
case RNDZAPENTCNT :
case RNDZAPENTCNT :
case RNDCLEARPOOL :
case RNDCLEARPOOL :
/*
/*
@ -1569,7 +1850,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
if ( ! capable ( CAP_SYS_ADMIN ) )
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
return - EPERM ;
input_pool . entropy_count = 0 ;
input_pool . entropy_count = 0 ;
nonblocking_pool . entropy_count = 0 ;
blocking_pool . entropy_count = 0 ;
blocking_pool . entropy_count = 0 ;
return 0 ;
return 0 ;
default :
default :
@ -1611,11 +1891,10 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
if ( flags & GRND_RANDOM )
if ( flags & GRND_RANDOM )
return _random_read ( flags & GRND_NONBLOCK , buf , count ) ;
return _random_read ( flags & GRND_NONBLOCK , buf , count ) ;
if ( unlikely ( nonblocking_pool . initialized = = 0 ) ) {
if ( ! crng_ready ( ) ) {
if ( flags & GRND_NONBLOCK )
if ( flags & GRND_NONBLOCK )
return - EAGAIN ;
return - EAGAIN ;
wait_event_interruptible ( urandom_init_wait ,
crng_wait_ready ( ) ;
nonblocking_pool . initialized ) ;
if ( signal_pending ( current ) )
if ( signal_pending ( current ) )
return - ERESTARTSYS ;
return - ERESTARTSYS ;
}
}
@ -1773,13 +2052,15 @@ int random_int_secret_init(void)
return 0 ;
return 0 ;
}
}
static DEFINE_PER_CPU ( __u32 [ MD5_DIGEST_WORDS ] , get_random_int_hash )
__aligned ( sizeof ( unsigned long ) ) ;
/*
/*
* Get a random word for internal kernel use only . Similar to urandom but
* Get a random word for internal kernel use only . Similar to urandom but
* with the goal of minimal entropy pool depletion . As a result , the random
* with the goal of minimal entropy pool depletion . As a result , the random
* value is not cryptographically secure but for several uses the cost of
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
* depleting entropy is too high
*/
*/
static DEFINE_PER_CPU ( __u32 [ MD5_DIGEST_WORDS ] , get_random_int_hash ) ;
unsigned int get_random_int ( void )
unsigned int get_random_int ( void )
{
{
__u32 * hash ;
__u32 * hash ;
@ -1849,6 +2130,11 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
{
struct entropy_store * poolp = & input_pool ;
struct entropy_store * poolp = & input_pool ;
if ( ! crng_ready ( ) ) {
crng_fast_load ( buffer , count ) ;
return ;
}
/* Suspend writing if we're above the trickle threshold.
/* Suspend writing if we're above the trickle threshold.
* We ' ll be woken up again once below random_write_wakeup_thresh ,
* We ' ll be woken up again once below random_write_wakeup_thresh ,
* or when the calling thread is about to terminate .
* or when the calling thread is about to terminate .