@ -38,6 +38,7 @@
# define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
# include <linux/cpu.h>
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/errno.h>
@ -52,6 +53,7 @@
# include <linux/notifier.h>
# include <linux/memory.h>
# include <linux/memory_hotplug.h>
# include <linux/percpu-defs.h>
# include <asm/page.h>
# include <asm/pgalloc.h>
@ -90,6 +92,8 @@ EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static xen_pfn_t frame_list [ PAGE_SIZE / sizeof ( unsigned long ) ] ;
static DEFINE_PER_CPU ( struct page * , balloon_scratch_page ) ;
/* List of ballooned pages, threaded through the mem_map array. */
static LIST_HEAD ( ballooned_pages ) ;
@ -412,7 +416,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
if ( xen_pv_domain ( ) & & ! PageHighMem ( page ) ) {
ret = HYPERVISOR_update_va_mapping (
( unsigned long ) __va ( pfn < < PAGE_SHIFT ) ,
__pte_ma ( 0 ) , 0 ) ;
pfn_pte ( page_to_pfn ( __get_cpu_var ( balloon_scratch_page ) ) ,
PAGE_KERNEL_RO ) , 0 ) ;
BUG_ON ( ret ) ;
}
# endif
@ -425,7 +430,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
/* No more mappings: invalidate P2M and add to balloon. */
for ( i = 0 ; i < nr_pages ; i + + ) {
pfn = mfn_to_pfn ( frame_list [ i ] ) ;
__set_phys_to_machine ( pfn , INVALID_P2M_ENTRY ) ;
__set_phys_to_machine ( pfn ,
pfn_to_mfn ( page_to_pfn ( __get_cpu_var ( balloon_scratch_page ) ) ) ) ;
balloon_append ( pfn_to_page ( pfn ) ) ;
}
@ -480,6 +486,18 @@ static void balloon_process(struct work_struct *work)
mutex_unlock ( & balloon_mutex ) ;
}
struct page * get_balloon_scratch_page ( void )
{
struct page * ret = get_cpu_var ( balloon_scratch_page ) ;
BUG_ON ( ret = = NULL ) ;
return ret ;
}
void put_balloon_scratch_page ( void )
{
put_cpu_var ( balloon_scratch_page ) ;
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
void balloon_set_new_target ( unsigned long target )
{
@ -573,13 +591,47 @@ static void __init balloon_add_region(unsigned long start_pfn,
}
}
static int __cpuinit balloon_cpu_notify ( struct notifier_block * self ,
unsigned long action , void * hcpu )
{
int cpu = ( long ) hcpu ;
switch ( action ) {
case CPU_UP_PREPARE :
if ( per_cpu ( balloon_scratch_page , cpu ) ! = NULL )
break ;
per_cpu ( balloon_scratch_page , cpu ) = alloc_page ( GFP_KERNEL ) ;
if ( per_cpu ( balloon_scratch_page , cpu ) = = NULL ) {
pr_warn ( " Failed to allocate balloon_scratch_page for cpu %d \n " , cpu ) ;
return NOTIFY_BAD ;
}
break ;
default :
break ;
}
return NOTIFY_OK ;
}
static struct notifier_block balloon_cpu_notifier __cpuinitdata = {
. notifier_call = balloon_cpu_notify ,
} ;
static int __init balloon_init ( void )
{
int i ;
int i , cpu ;
if ( ! xen_domain ( ) )
return - ENODEV ;
for_each_online_cpu ( cpu )
{
per_cpu ( balloon_scratch_page , cpu ) = alloc_page ( GFP_KERNEL ) ;
if ( per_cpu ( balloon_scratch_page , cpu ) = = NULL ) {
pr_warn ( " Failed to allocate balloon_scratch_page for cpu %d \n " , cpu ) ;
return - ENOMEM ;
}
}
register_cpu_notifier ( & balloon_cpu_notifier ) ;
pr_info ( " Initialising balloon driver \n " ) ;
balloon_stats . current_pages = xen_pv_domain ( )
@ -616,4 +668,15 @@ static int __init balloon_init(void)
subsys_initcall ( balloon_init ) ;
static int __init balloon_clear ( void )
{
int cpu ;
for_each_possible_cpu ( cpu )
per_cpu ( balloon_scratch_page , cpu ) = NULL ;
return 0 ;
}
early_initcall ( balloon_clear ) ;
MODULE_LICENSE ( " GPL " ) ;