@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
return ret ;
return ret ;
}
}
struct mmap_m fn_state {
struct mmap_g fn_state {
unsigned long va ;
unsigned long va ;
struct vm_area_struct * vma ;
struct vm_area_struct * vma ;
domid_t domain ;
domid_t domain ;
} ;
} ;
static int mmap_m fn_range ( void * data , void * state )
static int mmap_g fn_range ( void * data , void * state )
{
{
struct privcmd_mmap_entry * msg = data ;
struct privcmd_mmap_entry * msg = data ;
struct mmap_m fn_state * st = state ;
struct mmap_g fn_state * st = state ;
struct vm_area_struct * vma = st - > vma ;
struct vm_area_struct * vma = st - > vma ;
int rc ;
int rc ;
@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
( ( msg - > va + ( msg - > npages < < PAGE_SHIFT ) ) > vma - > vm_end ) )
( ( msg - > va + ( msg - > npages < < PAGE_SHIFT ) ) > vma - > vm_end ) )
return - EINVAL ;
return - EINVAL ;
rc = xen_remap_domain_m fn_range ( vma ,
rc = xen_remap_domain_g fn_range ( vma ,
msg - > va & PAGE_MASK ,
msg - > va & PAGE_MASK ,
msg - > mfn , msg - > npages ,
msg - > mfn , msg - > npages ,
vma - > vm_page_prot ,
vma - > vm_page_prot ,
@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
struct vm_area_struct * vma ;
struct vm_area_struct * vma ;
int rc ;
int rc ;
LIST_HEAD ( pagelist ) ;
LIST_HEAD ( pagelist ) ;
struct mmap_m fn_state state ;
struct mmap_g fn_state state ;
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
if ( xen_feature ( XENFEAT_auto_translated_physmap ) )
if ( xen_feature ( XENFEAT_auto_translated_physmap ) )
@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
rc = traverse_pages ( mmapcmd . num , sizeof ( struct privcmd_mmap_entry ) ,
rc = traverse_pages ( mmapcmd . num , sizeof ( struct privcmd_mmap_entry ) ,
& pagelist ,
& pagelist ,
mmap_m fn_range , & state ) ;
mmap_g fn_range , & state ) ;
out_up :
out_up :
@ -299,18 +299,18 @@ struct mmap_batch_state {
int global_error ;
int global_error ;
int version ;
int version ;
/* User-space m fn array to store errors in the second pass for V1. */
/* User-space g fn array to store errors in the second pass for V1. */
xen_pfn_t __user * user_m fn ;
xen_pfn_t __user * user_g fn ;
/* User-space int array to store errors in the second pass for V2. */
/* User-space int array to store errors in the second pass for V2. */
int __user * user_err ;
int __user * user_err ;
} ;
} ;
/* auto translated dom0 note: if domU being created is PV, then m fn is
/* auto translated dom0 note: if domU being created is PV, then g fn is
* mfn ( addr on bus ) . If it ' s auto xlated , then m fn is pfn ( input to HAP ) .
* mfn ( addr on bus ) . If it ' s auto xlated , then g fn is pfn ( input to HAP ) .
*/
*/
static int mmap_batch_fn ( void * data , int nr , void * state )
static int mmap_batch_fn ( void * data , int nr , void * state )
{
{
xen_pfn_t * m fnp = data ;
xen_pfn_t * g fnp = data ;
struct mmap_batch_state * st = state ;
struct mmap_batch_state * st = state ;
struct vm_area_struct * vma = st - > vma ;
struct vm_area_struct * vma = st - > vma ;
struct page * * pages = vma - > vm_private_data ;
struct page * * pages = vma - > vm_private_data ;
@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
cur_pages = & pages [ st - > index ] ;
cur_pages = & pages [ st - > index ] ;
BUG_ON ( nr < 0 ) ;
BUG_ON ( nr < 0 ) ;
ret = xen_remap_domain_m fn_array ( st - > vma , st - > va & PAGE_MASK , m fnp, nr ,
ret = xen_remap_domain_g fn_array ( st - > vma , st - > va & PAGE_MASK , g fnp, nr ,
( int * ) m fnp, st - > vma - > vm_page_prot ,
( int * ) g fnp, st - > vma - > vm_page_prot ,
st - > domain , cur_pages ) ;
st - > domain , cur_pages ) ;
/* Adjust the global_error? */
/* Adjust the global_error? */
@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
if ( st - > version = = 1 ) {
if ( st - > version = = 1 ) {
if ( err ) {
if ( err ) {
xen_pfn_t m fn;
xen_pfn_t g fn;
ret = get_user ( m fn, st - > user_m fn ) ;
ret = get_user ( g fn, st - > user_g fn ) ;
if ( ret < 0 )
if ( ret < 0 )
return ret ;
return ret ;
/*
/*
* V1 encodes the error codes in the 32 bit top
* V1 encodes the error codes in the 32 bit top
* nibble of the m fn ( with its known
* nibble of the g fn ( with its known
* limitations vis - a - vis 64 bit callers ) .
* limitations vis - a - vis 64 bit callers ) .
*/
*/
m fn | = ( err = = - ENOENT ) ?
g fn | = ( err = = - ENOENT ) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR ;
PRIVCMD_MMAPBATCH_MFN_ERROR ;
return __put_user ( m fn, st - > user_m fn + + ) ;
return __put_user ( g fn, st - > user_g fn + + ) ;
} else
} else
st - > user_m fn + + ;
st - > user_g fn + + ;
} else { /* st->version == 2 */
} else { /* st->version == 2 */
if ( err )
if ( err )
return __put_user ( err , st - > user_err + + ) ;
return __put_user ( err , st - > user_err + + ) ;
@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
return 0 ;
return 0 ;
}
}
/* Allocate pfns that are then mapped with gm fns from foreign domid. Update
/* Allocate pfns that are then mapped with gfns from foreign domid. Update
* the vma with the page info to use later .
* the vma with the page info to use later .
* Returns : 0 if success , otherwise - errno
* Returns : 0 if success , otherwise - errno
*/
*/
@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
if ( state . global_error ) {
if ( state . global_error ) {
/* Write back errors in second pass. */
/* Write back errors in second pass. */
state . user_m fn = ( xen_pfn_t * ) m . arr ;
state . user_g fn = ( xen_pfn_t * ) m . arr ;
state . user_err = m . err ;
state . user_err = m . err ;
ret = traverse_pages_block ( m . num , sizeof ( xen_pfn_t ) ,
ret = traverse_pages_block ( m . num , sizeof ( xen_pfn_t ) ,
& pagelist , mmap_return_errors , & state ) ;
& pagelist , mmap_return_errors , & state ) ;
@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
if ( ! xen_feature ( XENFEAT_auto_translated_physmap ) | | ! numpgs | | ! pages )
if ( ! xen_feature ( XENFEAT_auto_translated_physmap ) | | ! numpgs | | ! pages )
return ;
return ;
rc = xen_unmap_domain_m fn_range ( vma , numpgs , pages ) ;
rc = xen_unmap_domain_g fn_range ( vma , numpgs , pages ) ;
if ( rc = = 0 )
if ( rc = = 0 )
free_xenballooned_pages ( numpgs , pages ) ;
free_xenballooned_pages ( numpgs , pages ) ;
else
else