@ -739,45 +739,159 @@ EXPORT_SYMBOL(__page_cache_alloc);
* at a cost of " thundering herd " phenomena during rare hash
* collisions .
*/
wait_queue_head_t * page_waitqueue ( struct page * page )
# define PAGE_WAIT_TABLE_BITS 8
# define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
static wait_queue_head_t page_wait_table [ PAGE_WAIT_TABLE_SIZE ] __cacheline_aligned ;
static wait_queue_head_t * page_waitqueue ( struct page * page )
{
return bit_waitqueue ( page , 0 ) ;
return & page_wait_table [ hash_ptr ( page , PAGE_WAIT_TABLE_BITS ) ] ;
}
EXPORT_SYMBOL ( page_waitqueue ) ;
void wait_on_page_bit ( struct page * page , int bit_nr )
void __init pagecache_init ( void )
{
DEFINE_WAIT_BIT ( wait , & page - > flags , bit_nr ) ;
int i ;
if ( test_bit ( bit_nr , & page - > flags ) )
__wait_on_bit ( page_waitqueue ( page ) , & wait , bit_wait_io ,
TASK_UNINTERRUPTIBLE ) ;
for ( i = 0 ; i < PAGE_WAIT_TABLE_SIZE ; i + + )
init_waitqueue_head ( & page_wait_table [ i ] ) ;
page_writeback_init ( ) ;
}
EXPORT_SYMBOL ( wait_on_page_bit ) ;
int wait_on_page_bit_killable ( struct page * page , int bit_nr )
struct wait_page_key {
struct page * page ;
int bit_nr ;
int page_match ;
} ;
struct wait_page_queue {
struct page * page ;
int bit_nr ;
wait_queue_t wait ;
} ;
static int wake_page_function ( wait_queue_t * wait , unsigned mode , int sync , void * arg )
{
DEFINE_WAIT_BIT ( wait , & page - > flags , bit_nr ) ;
struct wait_page_key * key = arg ;
struct wait_page_queue * wait_page
= container_of ( wait , struct wait_page_queue , wait ) ;
if ( wait_page - > page ! = key - > page )
return 0 ;
key - > page_match = 1 ;
if ( ! test_bit ( bit_nr , & page - > flags ) )
if ( wait_page - > bit_nr ! = key - > bit_nr )
return 0 ;
if ( test_bit ( key - > bit_nr , & key - > page - > flags ) )
return 0 ;
return __wait_on_bit ( page_waitqueue ( page ) , & wait ,
bit_wait_io , TASK_KILLABLE ) ;
return autoremove_wake_function ( wait , mode , sync , key ) ;
}
int wait_on_page_bit_killable_timeout ( struct page * page ,
int bit_nr , unsigned long timeout )
void wake_up_page_bit ( struct page * page , int bit_nr )
{
DEFINE_WAIT_BIT ( wait , & page - > flags , bit_nr ) ;
wait_queue_head_t * q = page_waitqueue ( page ) ;
struct wait_page_key key ;
unsigned long flags ;
wait . key . timeout = jiffies + timeout ;
if ( ! test_bit ( bit_nr , & page - > flags ) )
return 0 ;
return __wait_on_bit ( page_waitqueue ( page ) , & wait ,
bit_wait_io_timeout , TASK_KILLABLE ) ;
key . page = page ;
key . bit_nr = bit_nr ;
key . page_match = 0 ;
spin_lock_irqsave ( & q - > lock , flags ) ;
__wake_up_locked_key ( q , TASK_NORMAL , & key ) ;
/*
* It is possible for other pages to have collided on the waitqueue
* hash , so in that case check for a page match . That prevents a long -
* term waiter
*
* It is still possible to miss a case here , when we woke page waiters
* and removed them from the waitqueue , but there are still other
* page waiters .
*/
if ( ! waitqueue_active ( q ) | | ! key . page_match ) {
ClearPageWaiters ( page ) ;
/*
* It ' s possible to miss clearing Waiters here , when we woke
* our page waiters , but the hashed waitqueue has waiters for
* other pages on it .
*
* That ' s okay , it ' s a rare case . The next waker will clear it .
*/
}
spin_unlock_irqrestore ( & q - > lock , flags ) ;
}
EXPORT_SYMBOL ( wake_up_page_bit ) ;
static inline int wait_on_page_bit_common ( wait_queue_head_t * q ,
struct page * page , int bit_nr , int state , bool lock )
{
struct wait_page_queue wait_page ;
wait_queue_t * wait = & wait_page . wait ;
int ret = 0 ;
init_wait ( wait ) ;
wait - > func = wake_page_function ;
wait_page . page = page ;
wait_page . bit_nr = bit_nr ;
for ( ; ; ) {
spin_lock_irq ( & q - > lock ) ;
if ( likely ( list_empty ( & wait - > task_list ) ) ) {
if ( lock )
__add_wait_queue_tail_exclusive ( q , wait ) ;
else
__add_wait_queue ( q , wait ) ;
SetPageWaiters ( page ) ;
}
set_current_state ( state ) ;
spin_unlock_irq ( & q - > lock ) ;
if ( likely ( test_bit ( bit_nr , & page - > flags ) ) ) {
io_schedule ( ) ;
if ( unlikely ( signal_pending_state ( state , current ) ) ) {
ret = - EINTR ;
break ;
}
}
if ( lock ) {
if ( ! test_and_set_bit_lock ( bit_nr , & page - > flags ) )
break ;
} else {
if ( ! test_bit ( bit_nr , & page - > flags ) )
break ;
}
}
finish_wait ( q , wait ) ;
/*
* A signal could leave PageWaiters set . Clearing it here if
* ! waitqueue_active would be possible ( by open - coding finish_wait ) ,
* but still fail to catch it in the case of wait hash collision . We
* already can fail to clear wait hash collision cases , so don ' t
* bother with signals either .
*/
return ret ;
}
void wait_on_page_bit ( struct page * page , int bit_nr )
{
wait_queue_head_t * q = page_waitqueue ( page ) ;
wait_on_page_bit_common ( q , page , bit_nr , TASK_UNINTERRUPTIBLE , false ) ;
}
EXPORT_SYMBOL ( wait_on_page_bit ) ;
int wait_on_page_bit_killable ( struct page * page , int bit_nr )
{
wait_queue_head_t * q = page_waitqueue ( page ) ;
return wait_on_page_bit_common ( q , page , bit_nr , TASK_KILLABLE , false ) ;
}
EXPORT_SYMBOL_GPL ( wait_on_page_bit_killable_timeout ) ;
/**
* add_page_wait_queue - Add an arbitrary waiter to a page ' s wait queue
@ -793,6 +907,7 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
spin_lock_irqsave ( & q - > lock , flags ) ;
__add_wait_queue ( q , waiter ) ;
SetPageWaiters ( page ) ;
spin_unlock_irqrestore ( & q - > lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( add_page_wait_queue ) ;
@ -874,23 +989,19 @@ EXPORT_SYMBOL_GPL(page_endio);
* __lock_page - get a lock on the page , assuming we need to sleep to get it
* @ page : the page to lock
*/
void __lock_page ( struct page * page )
void __lock_page ( struct page * __ page)
{
struct page * page_head = compound_head ( page ) ;
DEFINE_WAIT_BIT ( wait , & page_head - > flags , PG_locked ) ;
__wait_on_bit_lock ( page_waitqueue ( page_head ) , & wait , bit_wait_io ,
TASK_UNINTERRUPTIBLE ) ;
struct page * page = compound_head ( __page ) ;
wait_queue_head_t * q = page_waitqueue ( page ) ;
wait_on_page_bit_common ( q , page , PG_locked , TASK_UNINTERRUPTIBLE , true ) ;
}
EXPORT_SYMBOL ( __lock_page ) ;
int __lock_page_killable ( struct page * page )
int __lock_page_killable ( struct page * __ page)
{
struct page * page_head = compound_head ( page ) ;
DEFINE_WAIT_BIT ( wait , & page_head - > flags , PG_locked ) ;
return __wait_on_bit_lock ( page_waitqueue ( page_head ) , & wait ,
bit_wait_io , TASK_KILLABLE ) ;
struct page * page = compound_head ( __page ) ;
wait_queue_head_t * q = page_waitqueue ( page ) ;
return wait_on_page_bit_common ( q , page , PG_locked , TASK_KILLABLE , true ) ;
}
EXPORT_SYMBOL_GPL ( __lock_page_killable ) ;