mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-23 16:53:58 -05:00
IOMMU Updates for Linux v3.10
The updates are mostly about the x86 IOMMUs this time. Exceptions are the groundwork for the PAMU IOMMU from Freescale (for a PPC platform) and an extension to the IOMMU group interface. On the x86 side this includes a workaround for VT-d to disable interrupt remapping on broken chipsets. On the AMD-Vi side the most important new feature is a kernel command-line interface to override broken information in IVRS ACPI tables and get interrupt remapping working this way. Besides that there are small fixes all over the place. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJRh2vAAAoJECvwRC2XARrjbjkP/jzKzeffybUpQsIJF8rs/IEt hSwqpGLr6WR5FdneEH9fiBIp4pyMDXmuAb/2ZNgB+DgPN3xgqmWVo4WLk7pMo3BS /xIz/lu7hIX3AtKt807pL9+rPdhGYEJ43Vmr4bW9x0l1kuNXy6fmMLcN5FaPKjV4 p4hY4jOstEgtYQw4wi39/9b4FsYoipZizkOUSdtCzWwTv7jOHH7/Wra8iZyzL6Je 1VlF/efp0ytTcwLdHOfGwPCIlZrQRtQCM4SqdAUG9bOL3ARR9Yu/0iW1295nbLzo CQX5CfKePvo/fGxki1jcBi+UCyxYKPosB5kCxmh4MAxCg/VzzMsaME/A73tLJa6W Y29bbjwPoBPMq03HX8S9R5QWY8HpFujUUp+J4TXcKuTgYEV28WfLu1uaeKD716nM LoXUojov7Cj8ZQZnhyu5l+XNaephBZLfw/8bM6bAxhlKXwAjmLiS5Z+srPl1GJee 5GCV+L94JifHLZaREWh3JFsh9O3W7Wno2++c4JU32uCWJHXH7tMgs2P8n5AY9rnT Km1a9y6w2MF3Gg9j4y6u75m0XnFTNzYjeJMUtqVlwVhNHhgaXfuIWY63xOQCLJs1 ThTHOjoh0VqONGobR/ywn+0ouo9X07DnWpluyFd+zY3XK0UE0NOu9XMr4i6TWxOf mlzWoEKxtw36XGHB/FtQ =MVc/ -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU updates from Joerg Roedel: "The updates are mostly about the x86 IOMMUs this time. Exceptions are the groundwork for the PAMU IOMMU from Freescale (for a PPC platform) and an extension to the IOMMU group interface. On the x86 side this includes a workaround for VT-d to disable interrupt remapping on broken chipsets. On the AMD-Vi side the most important new feature is a kernel command-line interface to override broken information in IVRS ACPI tables and get interrupt remapping working this way. Besides that there are small fixes all over the place." * tag 'iommu-updates-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (24 commits) iommu/tegra: Fix printk formats for dma_addr_t iommu: Add a function to find an iommu group by id iommu/vt-d: Remove warning for HPET scope type iommu: Move swap_pci_ref function to drivers/iommu/pci.h. iommu/vt-d: Disable translation if already enabled iommu/amd: fix error return code in early_amd_iommu_init() iommu/AMD: Per-thread IOMMU Interrupt Handling iommu: Include linux/err.h iommu/amd: Workaround for ERBT1312 iommu/amd: Document ivrs_ioapic and ivrs_hpet parameters iommu/amd: Don't report firmware bugs with cmd-line ivrs overrides iommu/amd: Add ioapic and hpet ivrs override iommu/amd: Add early maps for ioapic and hpet iommu/amd: Extend IVRS special device data structure iommu/amd: Move add_special_device() to __init iommu: Fix compile warnings with forward declarations iommu/amd: Properly initialize irq-table lock iommu/amd: Use AMD specific data structure for irq remapping iommu/amd: Remove map_sg_no_iommu() iommu/vt-d: add quirk for broken interrupt remapping on 55XX chipsets ...
This commit is contained in:
commit
99737982ca
20 changed files with 380 additions and 131 deletions
|
@ -1277,6 +1277,20 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||||
|
|
||||||
iucv= [HW,NET]
|
iucv= [HW,NET]
|
||||||
|
|
||||||
|
ivrs_ioapic [HW,X86_64]
|
||||||
|
Provide an override to the IOAPIC-ID<->DEVICE-ID
|
||||||
|
mapping provided in the IVRS ACPI table. For
|
||||||
|
example, to map IOAPIC-ID decimal 10 to
|
||||||
|
PCI device 00:14.0 write the parameter as:
|
||||||
|
ivrs_ioapic[10]=00:14.0
|
||||||
|
|
||||||
|
ivrs_hpet [HW,X86_64]
|
||||||
|
Provide an override to the HPET-ID<->DEVICE-ID
|
||||||
|
mapping provided in the IVRS ACPI table. For
|
||||||
|
example, to map HPET-ID decimal 0 to
|
||||||
|
PCI device 00:14.0 write the parameter as:
|
||||||
|
ivrs_hpet[0]=00:14.0
|
||||||
|
|
||||||
js= [HW,JOY] Analog joystick
|
js= [HW,JOY] Analog joystick
|
||||||
See Documentation/input/joystick.txt.
|
See Documentation/input/joystick.txt.
|
||||||
|
|
||||||
|
|
|
@ -24,10 +24,18 @@
|
||||||
|
|
||||||
#include <asm/io_apic.h>
|
#include <asm/io_apic.h>
|
||||||
|
|
||||||
|
struct IO_APIC_route_entry;
|
||||||
|
struct io_apic_irq_attr;
|
||||||
|
struct irq_chip;
|
||||||
|
struct msi_msg;
|
||||||
|
struct pci_dev;
|
||||||
|
struct irq_cfg;
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_REMAP
|
#ifdef CONFIG_IRQ_REMAP
|
||||||
|
|
||||||
extern void setup_irq_remapping_ops(void);
|
extern void setup_irq_remapping_ops(void);
|
||||||
extern int irq_remapping_supported(void);
|
extern int irq_remapping_supported(void);
|
||||||
|
extern void set_irq_remapping_broken(void);
|
||||||
extern int irq_remapping_prepare(void);
|
extern int irq_remapping_prepare(void);
|
||||||
extern int irq_remapping_enable(void);
|
extern int irq_remapping_enable(void);
|
||||||
extern void irq_remapping_disable(void);
|
extern void irq_remapping_disable(void);
|
||||||
|
@ -54,6 +62,7 @@ void irq_remap_modify_chip_defaults(struct irq_chip *chip);
|
||||||
|
|
||||||
static inline void setup_irq_remapping_ops(void) { }
|
static inline void setup_irq_remapping_ops(void) { }
|
||||||
static inline int irq_remapping_supported(void) { return 0; }
|
static inline int irq_remapping_supported(void) { return 0; }
|
||||||
|
static inline void set_irq_remapping_broken(void) { }
|
||||||
static inline int irq_remapping_prepare(void) { return -ENODEV; }
|
static inline int irq_remapping_prepare(void) { return -ENODEV; }
|
||||||
static inline int irq_remapping_enable(void) { return -ENODEV; }
|
static inline int irq_remapping_enable(void) { return -ENODEV; }
|
||||||
static inline void irq_remapping_disable(void) { }
|
static inline void irq_remapping_disable(void) { }
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/gart.h>
|
#include <asm/gart.h>
|
||||||
|
#include <asm/irq_remapping.h>
|
||||||
|
|
||||||
static void __init fix_hypertransport_config(int num, int slot, int func)
|
static void __init fix_hypertransport_config(int num, int slot, int func)
|
||||||
{
|
{
|
||||||
|
@ -192,6 +193,21 @@ static void __init ati_bugs_contd(int num, int slot, int func)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void __init intel_remapping_check(int num, int slot, int func)
|
||||||
|
{
|
||||||
|
u8 revision;
|
||||||
|
|
||||||
|
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Revision 0x13 of this chipset supports irq remapping
|
||||||
|
* but has an erratum that breaks its behavior, flag it as such
|
||||||
|
*/
|
||||||
|
if (revision == 0x13)
|
||||||
|
set_irq_remapping_broken();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
#define QFLAG_APPLY_ONCE 0x1
|
#define QFLAG_APPLY_ONCE 0x1
|
||||||
#define QFLAG_APPLIED 0x2
|
#define QFLAG_APPLIED 0x2
|
||||||
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
|
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
|
||||||
|
@ -221,6 +237,10 @@ static struct chipset early_qrk[] __initdata = {
|
||||||
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
|
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
|
||||||
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
|
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
|
||||||
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
|
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
|
||||||
|
{ PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
|
||||||
|
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
|
||||||
|
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
|
||||||
|
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
#include "amd_iommu_proto.h"
|
#include "amd_iommu_proto.h"
|
||||||
#include "amd_iommu_types.h"
|
#include "amd_iommu_types.h"
|
||||||
#include "irq_remapping.h"
|
#include "irq_remapping.h"
|
||||||
|
#include "pci.h"
|
||||||
|
|
||||||
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
||||||
|
|
||||||
|
@ -263,12 +264,6 @@ static bool check_device(struct device *dev)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
|
|
||||||
{
|
|
||||||
pci_dev_put(*from);
|
|
||||||
*from = to;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
|
static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
|
||||||
{
|
{
|
||||||
while (!bus->self) {
|
while (!bus->self) {
|
||||||
|
@ -701,9 +696,6 @@ retry:
|
||||||
static void iommu_poll_events(struct amd_iommu *iommu)
|
static void iommu_poll_events(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
u32 head, tail;
|
u32 head, tail;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
|
||||||
|
|
||||||
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
||||||
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
|
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
|
||||||
|
@ -714,8 +706,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
|
||||||
}
|
}
|
||||||
|
|
||||||
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
|
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
|
||||||
|
@ -740,17 +730,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
|
||||||
|
|
||||||
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
|
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
u32 head, tail;
|
u32 head, tail;
|
||||||
|
|
||||||
if (iommu->ppr_log == NULL)
|
if (iommu->ppr_log == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* enable ppr interrupts again */
|
|
||||||
writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
|
||||||
|
|
||||||
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||||
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||||
|
|
||||||
|
@ -786,34 +770,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
|
||||||
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
|
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
|
||||||
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||||
|
|
||||||
/*
|
|
||||||
* Release iommu->lock because ppr-handling might need to
|
|
||||||
* re-acquire it
|
|
||||||
*/
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
||||||
|
|
||||||
/* Handle PPR entry */
|
/* Handle PPR entry */
|
||||||
iommu_handle_ppr_entry(iommu, entry);
|
iommu_handle_ppr_entry(iommu, entry);
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
|
||||||
|
|
||||||
/* Refresh ring-buffer information */
|
/* Refresh ring-buffer information */
|
||||||
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||||
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
||||||
{
|
{
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu = (struct amd_iommu *) data;
|
||||||
|
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||||
|
|
||||||
for_each_iommu(iommu) {
|
while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
|
||||||
|
/* Enable EVT and PPR interrupts again */
|
||||||
|
writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
|
||||||
|
iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||||
|
|
||||||
|
if (status & MMIO_STATUS_EVT_INT_MASK) {
|
||||||
|
pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
|
||||||
iommu_poll_events(iommu);
|
iommu_poll_events(iommu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status & MMIO_STATUS_PPR_INT_MASK) {
|
||||||
|
pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
|
||||||
iommu_poll_ppr_log(iommu);
|
iommu_poll_ppr_log(iommu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hardware bug: ERBT1312
|
||||||
|
* When re-enabling interrupt (by writing 1
|
||||||
|
* to clear the bit), the hardware might also try to set
|
||||||
|
* the interrupt bit in the event status register.
|
||||||
|
* In this scenario, the bit will be set, and disable
|
||||||
|
* subsequent interrupts.
|
||||||
|
*
|
||||||
|
* Workaround: The IOMMU driver should read back the
|
||||||
|
* status register and check if the interrupt bits are cleared.
|
||||||
|
* If not, driver will need to go through the interrupt handler
|
||||||
|
* again and re-clear the bits
|
||||||
|
*/
|
||||||
|
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||||
|
}
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2838,24 +2838,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a special map_sg function which is used if we should map a
|
|
||||||
* device which is not handled by an AMD IOMMU in the system.
|
|
||||||
*/
|
|
||||||
static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
|
|
||||||
int nelems, int dir)
|
|
||||||
{
|
|
||||||
struct scatterlist *s;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for_each_sg(sglist, s, nelems, i) {
|
|
||||||
s->dma_address = (dma_addr_t)sg_phys(s);
|
|
||||||
s->dma_length = s->length;
|
|
||||||
}
|
|
||||||
|
|
||||||
return nelems;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The exported map_sg function for dma_ops (handles scatter-gather
|
* The exported map_sg function for dma_ops (handles scatter-gather
|
||||||
* lists).
|
* lists).
|
||||||
|
@ -2875,9 +2857,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
INC_STATS_COUNTER(cnt_map_sg);
|
INC_STATS_COUNTER(cnt_map_sg);
|
||||||
|
|
||||||
domain = get_domain(dev);
|
domain = get_domain(dev);
|
||||||
if (PTR_ERR(domain) == -EINVAL)
|
if (IS_ERR(domain))
|
||||||
return map_sg_no_iommu(dev, sglist, nelems, dir);
|
|
||||||
else if (IS_ERR(domain))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
dma_mask = *dev->dma_mask;
|
dma_mask = *dev->dma_mask;
|
||||||
|
@ -3410,7 +3390,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
||||||
unsigned long iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = dom->priv;
|
struct protection_domain *domain = dom->priv;
|
||||||
unsigned long offset_mask;
|
unsigned long offset_mask;
|
||||||
|
@ -3947,6 +3927,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
|
||||||
if (!table)
|
if (!table)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/* Initialize table spin-lock */
|
||||||
|
spin_lock_init(&table->lock);
|
||||||
|
|
||||||
if (ioapic)
|
if (ioapic)
|
||||||
/* Keep the first 32 indexes free for IOAPIC interrupts */
|
/* Keep the first 32 indexes free for IOAPIC interrupts */
|
||||||
table->min_index = 32;
|
table->min_index = 32;
|
||||||
|
@ -4007,7 +3990,7 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
|
||||||
c = 0;
|
c = 0;
|
||||||
|
|
||||||
if (c == count) {
|
if (c == count) {
|
||||||
struct irq_2_iommu *irte_info;
|
struct irq_2_irte *irte_info;
|
||||||
|
|
||||||
for (; c != 0; --c)
|
for (; c != 0; --c)
|
||||||
table->table[index - c + 1] = IRTE_ALLOCATED;
|
table->table[index - c + 1] = IRTE_ALLOCATED;
|
||||||
|
@ -4015,9 +3998,9 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
|
||||||
index -= count - 1;
|
index -= count - 1;
|
||||||
|
|
||||||
cfg->remapped = 1;
|
cfg->remapped = 1;
|
||||||
irte_info = &cfg->irq_2_iommu;
|
irte_info = &cfg->irq_2_irte;
|
||||||
irte_info->sub_handle = devid;
|
irte_info->devid = devid;
|
||||||
irte_info->irte_index = index;
|
irte_info->index = index;
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -4098,7 +4081,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||||
struct io_apic_irq_attr *attr)
|
struct io_apic_irq_attr *attr)
|
||||||
{
|
{
|
||||||
struct irq_remap_table *table;
|
struct irq_remap_table *table;
|
||||||
struct irq_2_iommu *irte_info;
|
struct irq_2_irte *irte_info;
|
||||||
struct irq_cfg *cfg;
|
struct irq_cfg *cfg;
|
||||||
union irte irte;
|
union irte irte;
|
||||||
int ioapic_id;
|
int ioapic_id;
|
||||||
|
@ -4110,7 +4093,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||||
if (!cfg)
|
if (!cfg)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
irte_info = &cfg->irq_2_iommu;
|
irte_info = &cfg->irq_2_irte;
|
||||||
ioapic_id = mpc_ioapic_id(attr->ioapic);
|
ioapic_id = mpc_ioapic_id(attr->ioapic);
|
||||||
devid = get_ioapic_devid(ioapic_id);
|
devid = get_ioapic_devid(ioapic_id);
|
||||||
|
|
||||||
|
@ -4125,8 +4108,8 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||||
|
|
||||||
/* Setup IRQ remapping info */
|
/* Setup IRQ remapping info */
|
||||||
cfg->remapped = 1;
|
cfg->remapped = 1;
|
||||||
irte_info->sub_handle = devid;
|
irte_info->devid = devid;
|
||||||
irte_info->irte_index = index;
|
irte_info->index = index;
|
||||||
|
|
||||||
/* Setup IRTE for IOMMU */
|
/* Setup IRTE for IOMMU */
|
||||||
irte.val = 0;
|
irte.val = 0;
|
||||||
|
@ -4160,7 +4143,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||||
static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||||
bool force)
|
bool force)
|
||||||
{
|
{
|
||||||
struct irq_2_iommu *irte_info;
|
struct irq_2_irte *irte_info;
|
||||||
unsigned int dest, irq;
|
unsigned int dest, irq;
|
||||||
struct irq_cfg *cfg;
|
struct irq_cfg *cfg;
|
||||||
union irte irte;
|
union irte irte;
|
||||||
|
@ -4171,12 +4154,12 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||||
|
|
||||||
cfg = data->chip_data;
|
cfg = data->chip_data;
|
||||||
irq = data->irq;
|
irq = data->irq;
|
||||||
irte_info = &cfg->irq_2_iommu;
|
irte_info = &cfg->irq_2_irte;
|
||||||
|
|
||||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
|
if (get_irte(irte_info->devid, irte_info->index, &irte))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
if (assign_irq_vector(irq, cfg, mask))
|
if (assign_irq_vector(irq, cfg, mask))
|
||||||
|
@ -4192,7 +4175,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||||
irte.fields.vector = cfg->vector;
|
irte.fields.vector = cfg->vector;
|
||||||
irte.fields.destination = dest;
|
irte.fields.destination = dest;
|
||||||
|
|
||||||
modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
|
modify_irte(irte_info->devid, irte_info->index, irte);
|
||||||
|
|
||||||
if (cfg->move_in_progress)
|
if (cfg->move_in_progress)
|
||||||
send_cleanup_vector(cfg);
|
send_cleanup_vector(cfg);
|
||||||
|
@ -4204,16 +4187,16 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||||
|
|
||||||
static int free_irq(int irq)
|
static int free_irq(int irq)
|
||||||
{
|
{
|
||||||
struct irq_2_iommu *irte_info;
|
struct irq_2_irte *irte_info;
|
||||||
struct irq_cfg *cfg;
|
struct irq_cfg *cfg;
|
||||||
|
|
||||||
cfg = irq_get_chip_data(irq);
|
cfg = irq_get_chip_data(irq);
|
||||||
if (!cfg)
|
if (!cfg)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
irte_info = &cfg->irq_2_iommu;
|
irte_info = &cfg->irq_2_irte;
|
||||||
|
|
||||||
free_irte(irte_info->sub_handle, irte_info->irte_index);
|
free_irte(irte_info->devid, irte_info->index);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4222,7 +4205,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
|
||||||
unsigned int irq, unsigned int dest,
|
unsigned int irq, unsigned int dest,
|
||||||
struct msi_msg *msg, u8 hpet_id)
|
struct msi_msg *msg, u8 hpet_id)
|
||||||
{
|
{
|
||||||
struct irq_2_iommu *irte_info;
|
struct irq_2_irte *irte_info;
|
||||||
struct irq_cfg *cfg;
|
struct irq_cfg *cfg;
|
||||||
union irte irte;
|
union irte irte;
|
||||||
|
|
||||||
|
@ -4230,7 +4213,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
|
||||||
if (!cfg)
|
if (!cfg)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
irte_info = &cfg->irq_2_iommu;
|
irte_info = &cfg->irq_2_irte;
|
||||||
|
|
||||||
irte.val = 0;
|
irte.val = 0;
|
||||||
irte.fields.vector = cfg->vector;
|
irte.fields.vector = cfg->vector;
|
||||||
|
@ -4239,11 +4222,11 @@ static void compose_msi_msg(struct pci_dev *pdev,
|
||||||
irte.fields.dm = apic->irq_dest_mode;
|
irte.fields.dm = apic->irq_dest_mode;
|
||||||
irte.fields.valid = 1;
|
irte.fields.valid = 1;
|
||||||
|
|
||||||
modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
|
modify_irte(irte_info->devid, irte_info->index, irte);
|
||||||
|
|
||||||
msg->address_hi = MSI_ADDR_BASE_HI;
|
msg->address_hi = MSI_ADDR_BASE_HI;
|
||||||
msg->address_lo = MSI_ADDR_BASE_LO;
|
msg->address_lo = MSI_ADDR_BASE_LO;
|
||||||
msg->data = irte_info->irte_index;
|
msg->data = irte_info->index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
|
static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
|
||||||
|
@ -4268,7 +4251,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
|
||||||
static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
||||||
int index, int offset)
|
int index, int offset)
|
||||||
{
|
{
|
||||||
struct irq_2_iommu *irte_info;
|
struct irq_2_irte *irte_info;
|
||||||
struct irq_cfg *cfg;
|
struct irq_cfg *cfg;
|
||||||
u16 devid;
|
u16 devid;
|
||||||
|
|
||||||
|
@ -4283,18 +4266,18 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
devid = get_device_id(&pdev->dev);
|
devid = get_device_id(&pdev->dev);
|
||||||
irte_info = &cfg->irq_2_iommu;
|
irte_info = &cfg->irq_2_irte;
|
||||||
|
|
||||||
cfg->remapped = 1;
|
cfg->remapped = 1;
|
||||||
irte_info->sub_handle = devid;
|
irte_info->devid = devid;
|
||||||
irte_info->irte_index = index + offset;
|
irte_info->index = index + offset;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||||
{
|
{
|
||||||
struct irq_2_iommu *irte_info;
|
struct irq_2_irte *irte_info;
|
||||||
struct irq_cfg *cfg;
|
struct irq_cfg *cfg;
|
||||||
int index, devid;
|
int index, devid;
|
||||||
|
|
||||||
|
@ -4302,7 +4285,7 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||||
if (!cfg)
|
if (!cfg)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
irte_info = &cfg->irq_2_iommu;
|
irte_info = &cfg->irq_2_irte;
|
||||||
devid = get_hpet_devid(id);
|
devid = get_hpet_devid(id);
|
||||||
if (devid < 0)
|
if (devid < 0)
|
||||||
return devid;
|
return devid;
|
||||||
|
@ -4312,8 +4295,8 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||||
return index;
|
return index;
|
||||||
|
|
||||||
cfg->remapped = 1;
|
cfg->remapped = 1;
|
||||||
irte_info->sub_handle = devid;
|
irte_info->devid = devid;
|
||||||
irte_info->irte_index = index;
|
irte_info->index = index;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,6 +213,14 @@ enum iommu_init_state {
|
||||||
IOMMU_INIT_ERROR,
|
IOMMU_INIT_ERROR,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Early ioapic and hpet maps from kernel command line */
|
||||||
|
#define EARLY_MAP_SIZE 4
|
||||||
|
static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
|
||||||
|
static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
|
||||||
|
static int __initdata early_ioapic_map_size;
|
||||||
|
static int __initdata early_hpet_map_size;
|
||||||
|
static bool __initdata cmdline_maps;
|
||||||
|
|
||||||
static enum iommu_init_state init_state = IOMMU_START_STATE;
|
static enum iommu_init_state init_state = IOMMU_START_STATE;
|
||||||
|
|
||||||
static int amd_iommu_enable_interrupts(void);
|
static int amd_iommu_enable_interrupts(void);
|
||||||
|
@ -703,31 +711,66 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
|
||||||
set_iommu_for_device(iommu, devid);
|
set_iommu_for_device(iommu, devid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int add_special_device(u8 type, u8 id, u16 devid)
|
static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
|
||||||
{
|
{
|
||||||
struct devid_map *entry;
|
struct devid_map *entry;
|
||||||
struct list_head *list;
|
struct list_head *list;
|
||||||
|
|
||||||
if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
|
if (type == IVHD_SPECIAL_IOAPIC)
|
||||||
|
list = &ioapic_map;
|
||||||
|
else if (type == IVHD_SPECIAL_HPET)
|
||||||
|
list = &hpet_map;
|
||||||
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
list_for_each_entry(entry, list, list) {
|
||||||
|
if (!(entry->id == id && entry->cmd_line))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
|
||||||
|
type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||||
if (!entry)
|
if (!entry)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
entry->id = id;
|
entry->id = id;
|
||||||
entry->devid = devid;
|
entry->devid = devid;
|
||||||
|
entry->cmd_line = cmd_line;
|
||||||
if (type == IVHD_SPECIAL_IOAPIC)
|
|
||||||
list = &ioapic_map;
|
|
||||||
else
|
|
||||||
list = &hpet_map;
|
|
||||||
|
|
||||||
list_add_tail(&entry->list, list);
|
list_add_tail(&entry->list, list);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init add_early_maps(void)
|
||||||
|
{
|
||||||
|
int i, ret;
|
||||||
|
|
||||||
|
for (i = 0; i < early_ioapic_map_size; ++i) {
|
||||||
|
ret = add_special_device(IVHD_SPECIAL_IOAPIC,
|
||||||
|
early_ioapic_map[i].id,
|
||||||
|
early_ioapic_map[i].devid,
|
||||||
|
early_ioapic_map[i].cmd_line);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < early_hpet_map_size; ++i) {
|
||||||
|
ret = add_special_device(IVHD_SPECIAL_HPET,
|
||||||
|
early_hpet_map[i].id,
|
||||||
|
early_hpet_map[i].devid,
|
||||||
|
early_hpet_map[i].cmd_line);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reads the device exclusion range from ACPI and initializes the IOMMU with
|
* Reads the device exclusion range from ACPI and initializes the IOMMU with
|
||||||
* it
|
* it
|
||||||
|
@ -764,6 +807,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||||
u32 dev_i, ext_flags = 0;
|
u32 dev_i, ext_flags = 0;
|
||||||
bool alias = false;
|
bool alias = false;
|
||||||
struct ivhd_entry *e;
|
struct ivhd_entry *e;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
|
||||||
|
ret = add_early_maps();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First save the recommended feature enable bits from ACPI
|
* First save the recommended feature enable bits from ACPI
|
||||||
|
@ -929,7 +978,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||||
PCI_FUNC(devid));
|
PCI_FUNC(devid));
|
||||||
|
|
||||||
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
|
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
|
||||||
ret = add_special_device(type, handle, devid);
|
ret = add_special_device(type, handle, devid, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
break;
|
break;
|
||||||
|
@ -1275,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
|
||||||
amd_iommu_int_handler,
|
amd_iommu_int_handler,
|
||||||
amd_iommu_int_thread,
|
amd_iommu_int_thread,
|
||||||
0, "AMD-Vi",
|
0, "AMD-Vi",
|
||||||
iommu->dev);
|
iommu);
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
pci_disable_msi(iommu->dev);
|
pci_disable_msi(iommu->dev);
|
||||||
|
@ -1638,18 +1687,28 @@ static void __init free_on_init_error(void)
|
||||||
|
|
||||||
static bool __init check_ioapic_information(void)
|
static bool __init check_ioapic_information(void)
|
||||||
{
|
{
|
||||||
|
const char *fw_bug = FW_BUG;
|
||||||
bool ret, has_sb_ioapic;
|
bool ret, has_sb_ioapic;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
has_sb_ioapic = false;
|
has_sb_ioapic = false;
|
||||||
ret = false;
|
ret = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have map overrides on the kernel command line the
|
||||||
|
* messages in this function might not describe firmware bugs
|
||||||
|
* anymore - so be careful
|
||||||
|
*/
|
||||||
|
if (cmdline_maps)
|
||||||
|
fw_bug = "";
|
||||||
|
|
||||||
for (idx = 0; idx < nr_ioapics; idx++) {
|
for (idx = 0; idx < nr_ioapics; idx++) {
|
||||||
int devid, id = mpc_ioapic_id(idx);
|
int devid, id = mpc_ioapic_id(idx);
|
||||||
|
|
||||||
devid = get_ioapic_devid(id);
|
devid = get_ioapic_devid(id);
|
||||||
if (devid < 0) {
|
if (devid < 0) {
|
||||||
pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
|
pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
|
||||||
|
fw_bug, id);
|
||||||
ret = false;
|
ret = false;
|
||||||
} else if (devid == IOAPIC_SB_DEVID) {
|
} else if (devid == IOAPIC_SB_DEVID) {
|
||||||
has_sb_ioapic = true;
|
has_sb_ioapic = true;
|
||||||
|
@ -1666,11 +1725,11 @@ static bool __init check_ioapic_information(void)
|
||||||
* when the BIOS is buggy and provides us the wrong
|
* when the BIOS is buggy and provides us the wrong
|
||||||
* device id for the IOAPIC in the system.
|
* device id for the IOAPIC in the system.
|
||||||
*/
|
*/
|
||||||
pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
|
pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
|
pr_err("AMD-Vi: Disabling interrupt remapping\n");
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1801,6 +1860,7 @@ static int __init early_amd_iommu_init(void)
|
||||||
* Interrupt remapping enabled, create kmem_cache for the
|
* Interrupt remapping enabled, create kmem_cache for the
|
||||||
* remapping tables.
|
* remapping tables.
|
||||||
*/
|
*/
|
||||||
|
ret = -ENOMEM;
|
||||||
amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
|
amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
|
||||||
MAX_IRQS_PER_TABLE * sizeof(u32),
|
MAX_IRQS_PER_TABLE * sizeof(u32),
|
||||||
IRQ_TABLE_ALIGNMENT,
|
IRQ_TABLE_ALIGNMENT,
|
||||||
|
@ -2097,8 +2157,70 @@ static int __init parse_amd_iommu_options(char *str)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init parse_ivrs_ioapic(char *str)
|
||||||
|
{
|
||||||
|
unsigned int bus, dev, fn;
|
||||||
|
int ret, id, i;
|
||||||
|
u16 devid;
|
||||||
|
|
||||||
|
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
|
||||||
|
|
||||||
|
if (ret != 4) {
|
||||||
|
pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (early_ioapic_map_size == EARLY_MAP_SIZE) {
|
||||||
|
pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
|
||||||
|
str);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
|
||||||
|
|
||||||
|
cmdline_maps = true;
|
||||||
|
i = early_ioapic_map_size++;
|
||||||
|
early_ioapic_map[i].id = id;
|
||||||
|
early_ioapic_map[i].devid = devid;
|
||||||
|
early_ioapic_map[i].cmd_line = true;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init parse_ivrs_hpet(char *str)
|
||||||
|
{
|
||||||
|
unsigned int bus, dev, fn;
|
||||||
|
int ret, id, i;
|
||||||
|
u16 devid;
|
||||||
|
|
||||||
|
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
|
||||||
|
|
||||||
|
if (ret != 4) {
|
||||||
|
pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (early_hpet_map_size == EARLY_MAP_SIZE) {
|
||||||
|
pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
|
||||||
|
str);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
|
||||||
|
|
||||||
|
cmdline_maps = true;
|
||||||
|
i = early_hpet_map_size++;
|
||||||
|
early_hpet_map[i].id = id;
|
||||||
|
early_hpet_map[i].devid = devid;
|
||||||
|
early_hpet_map[i].cmd_line = true;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
__setup("amd_iommu_dump", parse_amd_iommu_dump);
|
__setup("amd_iommu_dump", parse_amd_iommu_dump);
|
||||||
__setup("amd_iommu=", parse_amd_iommu_options);
|
__setup("amd_iommu=", parse_amd_iommu_options);
|
||||||
|
__setup("ivrs_ioapic", parse_ivrs_ioapic);
|
||||||
|
__setup("ivrs_hpet", parse_ivrs_hpet);
|
||||||
|
|
||||||
IOMMU_INIT_FINISH(amd_iommu_detect,
|
IOMMU_INIT_FINISH(amd_iommu_detect,
|
||||||
gart_iommu_hole_init,
|
gart_iommu_hole_init,
|
||||||
|
|
|
@ -100,6 +100,7 @@
|
||||||
#define PASID_MASK 0x000fffff
|
#define PASID_MASK 0x000fffff
|
||||||
|
|
||||||
/* MMIO status bits */
|
/* MMIO status bits */
|
||||||
|
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
|
||||||
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
|
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
|
||||||
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
|
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
|
||||||
|
|
||||||
|
@ -589,6 +590,7 @@ struct devid_map {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
u8 id;
|
u8 id;
|
||||||
u16 devid;
|
u16 devid;
|
||||||
|
bool cmd_line;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
|
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
|
||||||
|
|
|
@ -646,7 +646,7 @@ out:
|
||||||
int alloc_iommu(struct dmar_drhd_unit *drhd)
|
int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||||
{
|
{
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
u32 ver;
|
u32 ver, sts;
|
||||||
static int iommu_allocated = 0;
|
static int iommu_allocated = 0;
|
||||||
int agaw = 0;
|
int agaw = 0;
|
||||||
int msagaw = 0;
|
int msagaw = 0;
|
||||||
|
@ -696,6 +696,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||||
(unsigned long long)iommu->cap,
|
(unsigned long long)iommu->cap,
|
||||||
(unsigned long long)iommu->ecap);
|
(unsigned long long)iommu->ecap);
|
||||||
|
|
||||||
|
/* Reflect status in gcmd */
|
||||||
|
sts = readl(iommu->reg + DMAR_GSTS_REG);
|
||||||
|
if (sts & DMA_GSTS_IRES)
|
||||||
|
iommu->gcmd |= DMA_GCMD_IRE;
|
||||||
|
if (sts & DMA_GSTS_TES)
|
||||||
|
iommu->gcmd |= DMA_GCMD_TE;
|
||||||
|
if (sts & DMA_GSTS_QIES)
|
||||||
|
iommu->gcmd |= DMA_GCMD_QIE;
|
||||||
|
|
||||||
raw_spin_lock_init(&iommu->register_lock);
|
raw_spin_lock_init(&iommu->register_lock);
|
||||||
|
|
||||||
drhd->iommu = iommu;
|
drhd->iommu = iommu;
|
||||||
|
@ -1205,7 +1214,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
|
||||||
|
|
||||||
/* TBD: ignore advanced fault log currently */
|
/* TBD: ignore advanced fault log currently */
|
||||||
if (!(fault_status & DMA_FSTS_PPF))
|
if (!(fault_status & DMA_FSTS_PPF))
|
||||||
goto clear_rest;
|
goto unlock_exit;
|
||||||
|
|
||||||
fault_index = dma_fsts_fault_record_index(fault_status);
|
fault_index = dma_fsts_fault_record_index(fault_status);
|
||||||
reg = cap_fault_reg_offset(iommu->cap);
|
reg = cap_fault_reg_offset(iommu->cap);
|
||||||
|
@ -1246,11 +1255,10 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
|
||||||
fault_index = 0;
|
fault_index = 0;
|
||||||
raw_spin_lock_irqsave(&iommu->register_lock, flag);
|
raw_spin_lock_irqsave(&iommu->register_lock, flag);
|
||||||
}
|
}
|
||||||
clear_rest:
|
|
||||||
/* clear all the other faults */
|
|
||||||
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
|
|
||||||
writel(fault_status, iommu->reg + DMAR_FSTS_REG);
|
|
||||||
|
|
||||||
|
writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
|
||||||
|
|
||||||
|
unlock_exit:
|
||||||
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
|
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -1298,6 +1306,7 @@ int __init enable_drhd_fault_handling(void)
|
||||||
for_each_drhd_unit(drhd) {
|
for_each_drhd_unit(drhd) {
|
||||||
int ret;
|
int ret;
|
||||||
struct intel_iommu *iommu = drhd->iommu;
|
struct intel_iommu *iommu = drhd->iommu;
|
||||||
|
u32 fault_status;
|
||||||
ret = dmar_set_interrupt(iommu);
|
ret = dmar_set_interrupt(iommu);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -1310,6 +1319,8 @@ int __init enable_drhd_fault_handling(void)
|
||||||
* Clear any previous faults.
|
* Clear any previous faults.
|
||||||
*/
|
*/
|
||||||
dmar_fault(iommu->irq, iommu);
|
dmar_fault(iommu->irq, iommu);
|
||||||
|
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
|
||||||
|
writel(fault_status, iommu->reg + DMAR_FSTS_REG);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1027,7 +1027,7 @@ done:
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct exynos_iommu_domain *priv = domain->priv;
|
struct exynos_iommu_domain *priv = domain->priv;
|
||||||
unsigned long *entry;
|
unsigned long *entry;
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
|
|
||||||
#include "irq_remapping.h"
|
#include "irq_remapping.h"
|
||||||
|
#include "pci.h"
|
||||||
|
|
||||||
#define ROOT_SIZE VTD_PAGE_SIZE
|
#define ROOT_SIZE VTD_PAGE_SIZE
|
||||||
#define CONTEXT_SIZE VTD_PAGE_SIZE
|
#define CONTEXT_SIZE VTD_PAGE_SIZE
|
||||||
|
@ -3665,6 +3666,7 @@ static struct notifier_block device_nb = {
|
||||||
int __init intel_iommu_init(void)
|
int __init intel_iommu_init(void)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct dmar_drhd_unit *drhd;
|
||||||
|
|
||||||
/* VT-d is required for a TXT/tboot launch, so enforce that */
|
/* VT-d is required for a TXT/tboot launch, so enforce that */
|
||||||
force_on = tboot_force_iommu();
|
force_on = tboot_force_iommu();
|
||||||
|
@ -3675,6 +3677,20 @@ int __init intel_iommu_init(void)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable translation if already enabled prior to OS handover.
|
||||||
|
*/
|
||||||
|
for_each_drhd_unit(drhd) {
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
|
if (drhd->ignored)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iommu = drhd->iommu;
|
||||||
|
if (iommu->gcmd & DMA_GCMD_TE)
|
||||||
|
iommu_disable_translation(iommu);
|
||||||
|
}
|
||||||
|
|
||||||
if (dmar_dev_scope_init() < 0) {
|
if (dmar_dev_scope_init() < 0) {
|
||||||
if (force_on)
|
if (force_on)
|
||||||
panic("tboot: Failed to initialize DMAR device scope\n");
|
panic("tboot: Failed to initialize DMAR device scope\n");
|
||||||
|
@ -4111,7 +4127,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct dmar_domain *dmar_domain = domain->priv;
|
struct dmar_domain *dmar_domain = domain->priv;
|
||||||
struct dma_pte *pte;
|
struct dma_pte *pte;
|
||||||
|
@ -4137,12 +4153,6 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
|
|
||||||
{
|
|
||||||
pci_dev_put(*from);
|
|
||||||
*from = to;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
||||||
|
|
||||||
static int intel_iommu_add_device(struct device *dev)
|
static int intel_iommu_add_device(struct device *dev)
|
||||||
|
|
|
@ -524,6 +524,16 @@ static int __init intel_irq_remapping_supported(void)
|
||||||
|
|
||||||
if (disable_irq_remap)
|
if (disable_irq_remap)
|
||||||
return 0;
|
return 0;
|
||||||
|
if (irq_remap_broken) {
|
||||||
|
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
|
||||||
|
"This system BIOS has enabled interrupt remapping\n"
|
||||||
|
"on a chipset that contains an erratum making that\n"
|
||||||
|
"feature unstable. To maintain system stability\n"
|
||||||
|
"interrupt remapping is being disabled. Please\n"
|
||||||
|
"contact your BIOS vendor for an update\n");
|
||||||
|
disable_irq_remap = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (!dmar_ir_support())
|
if (!dmar_ir_support())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -204,6 +204,35 @@ again:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_group_alloc);
|
EXPORT_SYMBOL_GPL(iommu_group_alloc);
|
||||||
|
|
||||||
|
struct iommu_group *iommu_group_get_by_id(int id)
|
||||||
|
{
|
||||||
|
struct kobject *group_kobj;
|
||||||
|
struct iommu_group *group;
|
||||||
|
const char *name;
|
||||||
|
|
||||||
|
if (!iommu_group_kset)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
name = kasprintf(GFP_KERNEL, "%d", id);
|
||||||
|
if (!name)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
group_kobj = kset_find_obj(iommu_group_kset, name);
|
||||||
|
kfree(name);
|
||||||
|
|
||||||
|
if (!group_kobj)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
group = container_of(group_kobj, struct iommu_group, kobj);
|
||||||
|
BUG_ON(group->id != id);
|
||||||
|
|
||||||
|
kobject_get(group->devices_kobj);
|
||||||
|
kobject_put(&group->kobj);
|
||||||
|
|
||||||
|
return group;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iommu_group_get_iommudata - retrieve iommu_data registered for a group
|
* iommu_group_get_iommudata - retrieve iommu_data registered for a group
|
||||||
* @group: the group
|
* @group: the group
|
||||||
|
@ -706,8 +735,7 @@ void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_detach_group);
|
EXPORT_SYMBOL_GPL(iommu_detach_group);
|
||||||
|
|
||||||
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
|
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
|
||||||
unsigned long iova)
|
|
||||||
{
|
{
|
||||||
if (unlikely(domain->ops->iova_to_phys == NULL))
|
if (unlikely(domain->ops->iova_to_phys == NULL))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -854,12 +882,13 @@ EXPORT_SYMBOL_GPL(iommu_unmap);
|
||||||
|
|
||||||
|
|
||||||
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||||
phys_addr_t paddr, u64 size)
|
phys_addr_t paddr, u64 size, int prot)
|
||||||
{
|
{
|
||||||
if (unlikely(domain->ops->domain_window_enable == NULL))
|
if (unlikely(domain->ops->domain_window_enable == NULL))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size);
|
return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
|
||||||
|
prot);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
|
EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
int irq_remapping_enabled;
|
int irq_remapping_enabled;
|
||||||
|
|
||||||
int disable_irq_remap;
|
int disable_irq_remap;
|
||||||
|
int irq_remap_broken;
|
||||||
int disable_sourceid_checking;
|
int disable_sourceid_checking;
|
||||||
int no_x2apic_optout;
|
int no_x2apic_optout;
|
||||||
|
|
||||||
|
@ -210,6 +211,11 @@ void __init setup_irq_remapping_ops(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_irq_remapping_broken(void)
|
||||||
|
{
|
||||||
|
irq_remap_broken = 1;
|
||||||
|
}
|
||||||
|
|
||||||
int irq_remapping_supported(void)
|
int irq_remapping_supported(void)
|
||||||
{
|
{
|
||||||
if (disable_irq_remap)
|
if (disable_irq_remap)
|
||||||
|
|
|
@ -32,6 +32,7 @@ struct pci_dev;
|
||||||
struct msi_msg;
|
struct msi_msg;
|
||||||
|
|
||||||
extern int disable_irq_remap;
|
extern int disable_irq_remap;
|
||||||
|
extern int irq_remap_broken;
|
||||||
extern int disable_sourceid_checking;
|
extern int disable_sourceid_checking;
|
||||||
extern int no_x2apic_optout;
|
extern int no_x2apic_optout;
|
||||||
extern int irq_remapping_enabled;
|
extern int irq_remapping_enabled;
|
||||||
|
@ -89,6 +90,7 @@ extern struct irq_remap_ops amd_iommu_irq_ops;
|
||||||
|
|
||||||
#define irq_remapping_enabled 0
|
#define irq_remapping_enabled 0
|
||||||
#define disable_irq_remap 1
|
#define disable_irq_remap 1
|
||||||
|
#define irq_remap_broken 0
|
||||||
|
|
||||||
#endif /* CONFIG_IRQ_REMAP */
|
#endif /* CONFIG_IRQ_REMAP */
|
||||||
|
|
||||||
|
|
|
@ -554,7 +554,7 @@ fail:
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long va)
|
dma_addr_t va)
|
||||||
{
|
{
|
||||||
struct msm_priv *priv;
|
struct msm_priv *priv;
|
||||||
struct msm_iommu_drvdata *iommu_drvdata;
|
struct msm_iommu_drvdata *iommu_drvdata;
|
||||||
|
|
|
@ -1219,7 +1219,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long da)
|
dma_addr_t da)
|
||||||
{
|
{
|
||||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
struct omap_iommu_domain *omap_domain = domain->priv;
|
||||||
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
||||||
|
|
29
drivers/iommu/pci.h
Normal file
29
drivers/iommu/pci.h
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License, version 2, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2013 Red Hat, Inc.
|
||||||
|
* Copyright (C) 2013 Freescale Semiconductor, Inc.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef __IOMMU_PCI_H
|
||||||
|
#define __IOMMU_PCI_H
|
||||||
|
|
||||||
|
/* Helper function for swapping pci device reference */
|
||||||
|
static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
|
||||||
|
{
|
||||||
|
pci_dev_put(*from);
|
||||||
|
*from = to;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* __IOMMU_PCI_H */
|
|
@ -296,7 +296,7 @@ done:
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
||||||
uint32_t l1entry = 0, l2entry = 0;
|
uint32_t l1entry = 0, l2entry = 0;
|
||||||
|
|
|
@ -279,7 +279,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct gart_device *gart = domain->priv;
|
struct gart_device *gart = domain->priv;
|
||||||
unsigned long pte;
|
unsigned long pte;
|
||||||
|
@ -295,7 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
|
|
||||||
pa = (pte & GART_PAGE_MASK);
|
pa = (pte & GART_PAGE_MASK);
|
||||||
if (!pfn_valid(__phys_to_pfn(pa))) {
|
if (!pfn_valid(__phys_to_pfn(pa))) {
|
||||||
dev_err(gart->dev, "No entry for %08lx:%08x\n", iova, pa);
|
dev_err(gart->dev, "No entry for %08llx:%08x\n",
|
||||||
|
(unsigned long long)iova, pa);
|
||||||
gart_dump_table(gart);
|
gart_dump_table(gart);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -757,7 +757,7 @@ static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
|
static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct smmu_as *as = domain->priv;
|
struct smmu_as *as = domain->priv;
|
||||||
unsigned long *pte;
|
unsigned long *pte;
|
||||||
|
@ -772,7 +772,8 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
pfn = *pte & SMMU_PFN_MASK;
|
pfn = *pte & SMMU_PFN_MASK;
|
||||||
WARN_ON(!pfn_valid(pfn));
|
WARN_ON(!pfn_valid(pfn));
|
||||||
dev_dbg(as->smmu->dev,
|
dev_dbg(as->smmu->dev,
|
||||||
"iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
|
"iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova,
|
||||||
|
pfn, as->asid);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&as->lock, flags);
|
spin_unlock_irqrestore(&as->lock, flags);
|
||||||
return PFN_PHYS(pfn);
|
return PFN_PHYS(pfn);
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#define __LINUX_IOMMU_H
|
#define __LINUX_IOMMU_H
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
#include <linux/err.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
#define IOMMU_READ (1)
|
#define IOMMU_READ (1)
|
||||||
|
@ -91,8 +92,7 @@ struct iommu_ops {
|
||||||
phys_addr_t paddr, size_t size, int prot);
|
phys_addr_t paddr, size_t size, int prot);
|
||||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||||
size_t size);
|
size_t size);
|
||||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
|
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
||||||
unsigned long iova);
|
|
||||||
int (*domain_has_cap)(struct iommu_domain *domain,
|
int (*domain_has_cap)(struct iommu_domain *domain,
|
||||||
unsigned long cap);
|
unsigned long cap);
|
||||||
int (*add_device)(struct device *dev);
|
int (*add_device)(struct device *dev);
|
||||||
|
@ -105,7 +105,7 @@ struct iommu_ops {
|
||||||
|
|
||||||
/* Window handling functions */
|
/* Window handling functions */
|
||||||
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
|
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
|
||||||
phys_addr_t paddr, u64 size);
|
phys_addr_t paddr, u64 size, int prot);
|
||||||
void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
|
void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
|
||||||
/* Set the numer of window per domain */
|
/* Set the numer of window per domain */
|
||||||
int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
|
int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
|
||||||
|
@ -125,6 +125,7 @@ struct iommu_ops {
|
||||||
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
|
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
|
||||||
extern bool iommu_present(struct bus_type *bus);
|
extern bool iommu_present(struct bus_type *bus);
|
||||||
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
|
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
|
||||||
|
extern struct iommu_group *iommu_group_get_by_id(int id);
|
||||||
extern void iommu_domain_free(struct iommu_domain *domain);
|
extern void iommu_domain_free(struct iommu_domain *domain);
|
||||||
extern int iommu_attach_device(struct iommu_domain *domain,
|
extern int iommu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
|
@ -134,8 +135,7 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot);
|
phys_addr_t paddr, size_t size, int prot);
|
||||||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
size_t size);
|
size_t size);
|
||||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
|
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||||
unsigned long iova);
|
|
||||||
extern int iommu_domain_has_cap(struct iommu_domain *domain,
|
extern int iommu_domain_has_cap(struct iommu_domain *domain,
|
||||||
unsigned long cap);
|
unsigned long cap);
|
||||||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||||
|
@ -171,7 +171,8 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
|
||||||
|
|
||||||
/* Window handling function prototypes */
|
/* Window handling function prototypes */
|
||||||
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||||
phys_addr_t offset, u64 size);
|
phys_addr_t offset, u64 size,
|
||||||
|
int prot);
|
||||||
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
|
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
|
||||||
/**
|
/**
|
||||||
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
|
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
|
||||||
|
@ -257,7 +258,7 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
|
|
||||||
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
|
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
|
||||||
u32 wnd_nr, phys_addr_t paddr,
|
u32 wnd_nr, phys_addr_t paddr,
|
||||||
u64 size)
|
u64 size, int prot)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
@ -267,8 +268,7 @@ static inline void iommu_domain_window_disable(struct iommu_domain *domain,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
|
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
|
||||||
unsigned long iova)
|
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue