iommu/amd: Don't put completion-wait semaphore on stack
The semaphore used by the AMD IOMMU to signal command completion lived on the stack until now, which was safe as the driver busy-waited on the semaphore with IRQs disabled, so the stack can't go away under the driver. But the recently introduced vmap-based stacks break this as the physical address of the semaphore can't be determinded easily anymore. The driver used the __pa() macro, but that only works in the direct-mapping. The result were Completion-Wait timeout errors seen by the IOMMU driver, breaking system boot. Since putting the semaphore on the stack is bad design anyway, move the semaphore into 'struct amd_iommu'. It is protected by the per-iommu lock and now in the direct mapping again. This fixes the Completion-Wait timeout errors and makes AMD IOMMU systems boot again with vmap-based stacks enabled. Reported-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Joerg Roedel <jroedel@suse.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
15f4eae70d
commit
4bf5beef57
@ -940,15 +940,13 @@ static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
|
|||||||
* Writes the command to the IOMMUs command buffer and informs the
|
* Writes the command to the IOMMUs command buffer and informs the
|
||||||
* hardware about the new command.
|
* hardware about the new command.
|
||||||
*/
|
*/
|
||||||
static int iommu_queue_command_sync(struct amd_iommu *iommu,
|
static int __iommu_queue_command_sync(struct amd_iommu *iommu,
|
||||||
struct iommu_cmd *cmd,
|
struct iommu_cmd *cmd,
|
||||||
bool sync)
|
bool sync)
|
||||||
{
|
{
|
||||||
u32 left, tail, head, next_tail;
|
u32 left, tail, head, next_tail;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
again:
|
again:
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
|
||||||
|
|
||||||
head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
|
head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
|
||||||
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
||||||
@ -957,15 +955,14 @@ again:
|
|||||||
|
|
||||||
if (left <= 2) {
|
if (left <= 2) {
|
||||||
struct iommu_cmd sync_cmd;
|
struct iommu_cmd sync_cmd;
|
||||||
volatile u64 sem = 0;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
build_completion_wait(&sync_cmd, (u64)&sem);
|
iommu->cmd_sem = 0;
|
||||||
|
|
||||||
|
build_completion_wait(&sync_cmd, (u64)&iommu->cmd_sem);
|
||||||
copy_cmd_to_buffer(iommu, &sync_cmd, tail);
|
copy_cmd_to_buffer(iommu, &sync_cmd, tail);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
if ((ret = wait_on_sem(&iommu->cmd_sem)) != 0)
|
||||||
|
|
||||||
if ((ret = wait_on_sem(&sem)) != 0)
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
goto again;
|
goto again;
|
||||||
@ -976,9 +973,21 @@ again:
|
|||||||
/* We need to sync now to make sure all commands are processed */
|
/* We need to sync now to make sure all commands are processed */
|
||||||
iommu->need_sync = sync;
|
iommu->need_sync = sync;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int iommu_queue_command_sync(struct amd_iommu *iommu,
|
||||||
|
struct iommu_cmd *cmd,
|
||||||
|
bool sync)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
ret = __iommu_queue_command_sync(iommu, cmd, sync);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||||
@ -993,19 +1002,29 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
|||||||
static int iommu_completion_wait(struct amd_iommu *iommu)
|
static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
struct iommu_cmd cmd;
|
struct iommu_cmd cmd;
|
||||||
volatile u64 sem = 0;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!iommu->need_sync)
|
if (!iommu->need_sync)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
build_completion_wait(&cmd, (u64)&sem);
|
|
||||||
|
|
||||||
ret = iommu_queue_command_sync(iommu, &cmd, false);
|
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
|
||||||
|
iommu->cmd_sem = 0;
|
||||||
|
|
||||||
|
ret = __iommu_queue_command_sync(iommu, &cmd, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto out_unlock;
|
||||||
|
|
||||||
return wait_on_sem(&sem);
|
ret = wait_on_sem(&iommu->cmd_sem);
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
|
static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
|
||||||
|
@ -524,6 +524,8 @@ struct amd_iommu {
|
|||||||
struct irq_domain *ir_domain;
|
struct irq_domain *ir_domain;
|
||||||
struct irq_domain *msi_domain;
|
struct irq_domain *msi_domain;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
volatile u64 __aligned(8) cmd_sem;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ACPIHID_UID_LEN 256
|
#define ACPIHID_UID_LEN 256
|
||||||
|
Loading…
x
Reference in New Issue
Block a user