The patch contains Intel IOMMU IA64 specific code. It defines new machvec dig_vtd, hooks for IOMMU, DMAR table detection, cache line flush function, etc. For a generic kernel with CONFIG_DMAR=y, if Intel IOMMU is detected, dig_vtd is used for machinve vector. Otherwise, kernel falls back to dig machine vector. Kernel parameter "machvec=dig" or "intel_iommu=off" can be used to force kernel to boot dig machine vector. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com>tirimbino
parent
6bb7a93548
commit
62fdd7678a
@ -0,0 +1,59 @@ |
||||
#include <linux/types.h> |
||||
#include <linux/kernel.h> |
||||
#include <linux/module.h> |
||||
#include <linux/intel-iommu.h> |
||||
|
||||
void * |
||||
vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
||||
gfp_t flags) |
||||
{ |
||||
return intel_alloc_coherent(dev, size, dma_handle, flags); |
||||
} |
||||
EXPORT_SYMBOL_GPL(vtd_alloc_coherent); |
||||
|
||||
void |
||||
vtd_free_coherent(struct device *dev, size_t size, void *vaddr, |
||||
dma_addr_t dma_handle) |
||||
{ |
||||
intel_free_coherent(dev, size, vaddr, dma_handle); |
||||
} |
||||
EXPORT_SYMBOL_GPL(vtd_free_coherent); |
||||
|
||||
dma_addr_t |
||||
vtd_map_single_attrs(struct device *dev, void *addr, size_t size, |
||||
int dir, struct dma_attrs *attrs) |
||||
{ |
||||
return intel_map_single(dev, (phys_addr_t)addr, size, dir); |
||||
} |
||||
EXPORT_SYMBOL_GPL(vtd_map_single_attrs); |
||||
|
||||
void |
||||
vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, |
||||
int dir, struct dma_attrs *attrs) |
||||
{ |
||||
intel_unmap_single(dev, iova, size, dir); |
||||
} |
||||
EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs); |
||||
|
||||
int |
||||
vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, |
||||
int dir, struct dma_attrs *attrs) |
||||
{ |
||||
return intel_map_sg(dev, sglist, nents, dir); |
||||
} |
||||
EXPORT_SYMBOL_GPL(vtd_map_sg_attrs); |
||||
|
||||
void |
||||
vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
||||
int nents, int dir, struct dma_attrs *attrs) |
||||
{ |
||||
intel_unmap_sg(dev, sglist, nents, dir); |
||||
} |
||||
EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs); |
||||
|
||||
int |
||||
vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
||||
{ |
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL_GPL(vtd_dma_mapping_error); |
@ -0,0 +1,3 @@ |
||||
#define MACHVEC_PLATFORM_NAME dig_vtd |
||||
#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig_vtd.h> |
||||
#include <asm/machvec_init.h> |
@ -0,0 +1,16 @@ |
||||
#ifndef _ASM_IA64_IOMMU_H |
||||
#define _ASM_IA64_IOMMU_H 1 |
||||
|
||||
#define cpu_has_x2apic 0 |
||||
/* 10 seconds */ |
||||
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) |
||||
|
||||
extern void pci_iommu_shutdown(void); |
||||
extern void no_iommu_init(void); |
||||
extern int force_iommu, no_iommu; |
||||
extern int iommu_detected; |
||||
extern void iommu_dma_init(void); |
||||
extern void machvec_init(const char *name); |
||||
extern int forbid_dac; |
||||
|
||||
#endif |
@ -0,0 +1,38 @@ |
||||
#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h |
||||
#define _ASM_IA64_MACHVEC_DIG_VTD_h |
||||
|
||||
extern ia64_mv_setup_t dig_setup; |
||||
extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent; |
||||
extern ia64_mv_dma_free_coherent vtd_free_coherent; |
||||
extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs; |
||||
extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs; |
||||
extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs; |
||||
extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs; |
||||
extern ia64_mv_dma_supported iommu_dma_supported; |
||||
extern ia64_mv_dma_mapping_error vtd_dma_mapping_error; |
||||
extern ia64_mv_dma_init pci_iommu_alloc; |
||||
|
||||
/*
|
||||
* This stuff has dual use! |
||||
* |
||||
* For a generic kernel, the macros are used to initialize the |
||||
* platform's machvec structure. When compiling a non-generic kernel, |
||||
* the macros are used directly. |
||||
*/ |
||||
#define platform_name "dig_vtd" |
||||
#define platform_setup dig_setup |
||||
#define platform_dma_init pci_iommu_alloc |
||||
#define platform_dma_alloc_coherent vtd_alloc_coherent |
||||
#define platform_dma_free_coherent vtd_free_coherent |
||||
#define platform_dma_map_single_attrs vtd_map_single_attrs |
||||
#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs |
||||
#define platform_dma_map_sg_attrs vtd_map_sg_attrs |
||||
#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs |
||||
#define platform_dma_sync_single_for_cpu machvec_dma_sync_single |
||||
#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg |
||||
#define platform_dma_sync_single_for_device machvec_dma_sync_single |
||||
#define platform_dma_sync_sg_for_device machvec_dma_sync_sg |
||||
#define platform_dma_supported iommu_dma_supported |
||||
#define platform_dma_mapping_error vtd_dma_mapping_error |
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ |
@ -0,0 +1,56 @@ |
||||
#ifndef ASM_IA64__SWIOTLB_H |
||||
#define ASM_IA64__SWIOTLB_H |
||||
|
||||
#include <linux/dma-mapping.h> |
||||
|
||||
/* SWIOTLB interface */ |
||||
|
||||
extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, |
||||
size_t size, int dir); |
||||
extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
||||
dma_addr_t *dma_handle, gfp_t flags); |
||||
extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
||||
size_t size, int dir); |
||||
extern void swiotlb_sync_single_for_cpu(struct device *hwdev, |
||||
dma_addr_t dev_addr, |
||||
size_t size, int dir); |
||||
extern void swiotlb_sync_single_for_device(struct device *hwdev, |
||||
dma_addr_t dev_addr, |
||||
size_t size, int dir); |
||||
extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, |
||||
dma_addr_t dev_addr, |
||||
unsigned long offset, |
||||
size_t size, int dir); |
||||
extern void swiotlb_sync_single_range_for_device(struct device *hwdev, |
||||
dma_addr_t dev_addr, |
||||
unsigned long offset, |
||||
size_t size, int dir); |
||||
extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, |
||||
struct scatterlist *sg, int nelems, |
||||
int dir); |
||||
extern void swiotlb_sync_sg_for_device(struct device *hwdev, |
||||
struct scatterlist *sg, int nelems, |
||||
int dir); |
||||
extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, |
||||
int nents, int direction); |
||||
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, |
||||
int nents, int direction); |
||||
extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); |
||||
extern void swiotlb_free_coherent(struct device *hwdev, size_t size, |
||||
void *vaddr, dma_addr_t dma_handle); |
||||
extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); |
||||
extern void swiotlb_init(void); |
||||
|
||||
extern int swiotlb_force; |
||||
|
||||
#ifdef CONFIG_SWIOTLB |
||||
extern int swiotlb; |
||||
extern void pci_swiotlb_init(void); |
||||
#else |
||||
#define swiotlb 0 |
||||
static inline void pci_swiotlb_init(void) |
||||
{ |
||||
} |
||||
#endif |
||||
|
||||
#endif /* ASM_IA64__SWIOTLB_H */ |
@ -0,0 +1,129 @@ |
||||
/*
|
||||
* Dynamic DMA mapping support. |
||||
*/ |
||||
|
||||
#include <linux/types.h> |
||||
#include <linux/mm.h> |
||||
#include <linux/string.h> |
||||
#include <linux/pci.h> |
||||
#include <linux/module.h> |
||||
#include <linux/dmar.h> |
||||
#include <asm/iommu.h> |
||||
#include <asm/machvec.h> |
||||
#include <linux/dma-mapping.h> |
||||
|
||||
#include <asm/machvec.h> |
||||
#include <asm/system.h> |
||||
|
||||
#ifdef CONFIG_DMAR |
||||
|
||||
#include <linux/kernel.h> |
||||
#include <linux/string.h> |
||||
|
||||
#include <asm/page.h> |
||||
#include <asm/iommu.h> |
||||
|
||||
dma_addr_t bad_dma_address __read_mostly; |
||||
EXPORT_SYMBOL(bad_dma_address); |
||||
|
||||
static int iommu_sac_force __read_mostly; |
||||
|
||||
int no_iommu __read_mostly; |
||||
#ifdef CONFIG_IOMMU_DEBUG |
||||
int force_iommu __read_mostly = 1; |
||||
#else |
||||
int force_iommu __read_mostly; |
||||
#endif |
||||
|
||||
/* Set this to 1 if there is a HW IOMMU in the system */ |
||||
int iommu_detected __read_mostly; |
||||
|
||||
/* Dummy device used for NULL arguments (normally ISA). Better would
|
||||
be probably a smaller DMA mask, but this is bug-to-bug compatible |
||||
to i386. */ |
||||
struct device fallback_dev = { |
||||
.bus_id = "fallback device", |
||||
.coherent_dma_mask = DMA_32BIT_MASK, |
||||
.dma_mask = &fallback_dev.coherent_dma_mask, |
||||
}; |
||||
|
||||
void __init pci_iommu_alloc(void) |
||||
{ |
||||
/*
|
||||
* The order of these functions is important for |
||||
* fall-back/fail-over reasons |
||||
*/ |
||||
detect_intel_iommu(); |
||||
|
||||
#ifdef CONFIG_SWIOTLB |
||||
pci_swiotlb_init(); |
||||
#endif |
||||
} |
||||
|
||||
static int __init pci_iommu_init(void) |
||||
{ |
||||
if (iommu_detected) |
||||
intel_iommu_init(); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
/* Must execute after PCI subsystem */ |
||||
fs_initcall(pci_iommu_init); |
||||
|
||||
void pci_iommu_shutdown(void) |
||||
{ |
||||
return; |
||||
} |
||||
|
||||
void __init |
||||
iommu_dma_init(void) |
||||
{ |
||||
return; |
||||
} |
||||
|
||||
struct dma_mapping_ops *dma_ops; |
||||
EXPORT_SYMBOL(dma_ops); |
||||
|
||||
int iommu_dma_supported(struct device *dev, u64 mask) |
||||
{ |
||||
struct dma_mapping_ops *ops = get_dma_ops(dev); |
||||
|
||||
#ifdef CONFIG_PCI |
||||
if (mask > 0xffffffff && forbid_dac > 0) { |
||||
dev_info(dev, "Disallowing DAC for device\n"); |
||||
return 0; |
||||
} |
||||
#endif |
||||
|
||||
if (ops->dma_supported_op) |
||||
return ops->dma_supported_op(dev, mask); |
||||
|
||||
/* Copied from i386. Doesn't make much sense, because it will
|
||||
only work for pci_alloc_coherent. |
||||
The caller just has to use GFP_DMA in this case. */ |
||||
if (mask < DMA_24BIT_MASK) |
||||
return 0; |
||||
|
||||
/* Tell the device to use SAC when IOMMU force is on. This
|
||||
allows the driver to use cheaper accesses in some cases. |
||||
|
||||
Problem with this is that if we overflow the IOMMU area and |
||||
return DAC as fallback address the device may not handle it |
||||
correctly. |
||||
|
||||
As a special case some controllers have a 39bit address |
||||
mode that is as efficient as 32bit (aic79xx). Don't force |
||||
SAC for these. Assume all masks <= 40 bits are of this |
||||
type. Normally this doesn't make any difference, but gives |
||||
more gentle handling of IOMMU overflow. */ |
||||
if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { |
||||
dev_info(dev, "Force SAC with mask %lx\n", mask); |
||||
return 0; |
||||
} |
||||
|
||||
return 1; |
||||
} |
||||
EXPORT_SYMBOL(iommu_dma_supported); |
||||
|
||||
#endif |
@ -0,0 +1,46 @@ |
||||
/* Glue code to lib/swiotlb.c */ |
||||
|
||||
#include <linux/pci.h> |
||||
#include <linux/cache.h> |
||||
#include <linux/module.h> |
||||
#include <linux/dma-mapping.h> |
||||
|
||||
#include <asm/swiotlb.h> |
||||
#include <asm/dma.h> |
||||
#include <asm/iommu.h> |
||||
#include <asm/machvec.h> |
||||
|
||||
int swiotlb __read_mostly; |
||||
EXPORT_SYMBOL(swiotlb); |
||||
|
||||
struct dma_mapping_ops swiotlb_dma_ops = { |
||||
.mapping_error = swiotlb_dma_mapping_error, |
||||
.alloc_coherent = swiotlb_alloc_coherent, |
||||
.free_coherent = swiotlb_free_coherent, |
||||
.map_single = swiotlb_map_single, |
||||
.unmap_single = swiotlb_unmap_single, |
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
||||
.sync_single_for_device = swiotlb_sync_single_for_device, |
||||
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
||||
.sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device, |
||||
.map_sg = swiotlb_map_sg, |
||||
.unmap_sg = swiotlb_unmap_sg, |
||||
.dma_supported_op = swiotlb_dma_supported, |
||||
}; |
||||
|
||||
void __init pci_swiotlb_init(void) |
||||
{ |
||||
if (!iommu_detected) { |
||||
#ifdef CONFIG_IA64_GENERIC |
||||
swiotlb = 1; |
||||
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); |
||||
machvec_init("dig"); |
||||
swiotlb_init(); |
||||
dma_ops = &swiotlb_dma_ops; |
||||
#else |
||||
panic("Unable to find Intel IOMMU"); |
||||
#endif |
||||
} |
||||
} |
Loading…
Reference in new issue