hw/arm/smmuv3-accel: Add support to issue invalidation cmd to host

Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
We only issue one cmd at a time for now.

Support for batching of commands will be added later after analysing the
impact.

Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
Message-id: 20260126104342.253965-20-skolothumtho@nvidia.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Shameer Kolothum
2026-01-29 13:32:05 +00:00
committed by Peter Maydell
parent 5ec2700dcb
commit 58a789c389
3 changed files with 60 additions and 0 deletions

View File

@@ -233,6 +233,42 @@ bool smmuv3_accel_install_ste_range(SMMUv3State *s, SMMUSIDRange *range,
return all_ok;
}
/*
* This issues the invalidation cmd to the host SMMUv3.
*
* sdev is non-NULL for SID based invalidations (e.g. CFGI_CD), and NULL for
* non SID invalidations such as SMMU_CMD_TLBI_NH_ASID and SMMU_CMD_TLBI_NH_VA.
*/
bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
Error **errp)
{
SMMUv3State *s = ARM_SMMUV3(bs);
SMMUv3AccelState *accel = s->s_accel;
uint32_t entry_num = 1;
/*
* No accel or viommu means no VFIO/IOMMUFD devices, nothing to
* invalidate.
*/
if (!accel || !accel->viommu) {
return true;
}
/*
* SID based invalidations (e.g. CFGI_CD) apply only to vfio-pci endpoints
* with a valid vIOMMU vdev.
*/
if (sdev && !container_of(sdev, SMMUv3AccelDevice, sdev)->vdev) {
return true;
}
/* Single command (entry_num = 1); no need to check returned entry_num */
return iommufd_backend_invalidate_cache(
accel->viommu->iommufd, accel->viommu->viommu_id,
IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
sizeof(Cmd), &entry_num, cmd, errp);
}
static bool
smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
Error **errp)

View File

@@ -47,6 +47,8 @@ bool smmuv3_accel_install_ste(SMMUv3State *s, SMMUDevice *sdev, int sid,
bool smmuv3_accel_install_ste_range(SMMUv3State *s, SMMUSIDRange *range,
Error **errp);
bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp);
bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
Error **errp);
void smmuv3_accel_reset(SMMUv3State *s);
#else
static inline void smmuv3_accel_init(SMMUv3State *s)
@@ -68,6 +70,12 @@ static inline bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp)
{
return true;
}
static inline bool
smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
Error **errp)
{
return true;
}
static inline void smmuv3_accel_reset(SMMUv3State *s)
{
}

View File

@@ -1388,6 +1388,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, Error **errp)
trace_smmuv3_cmdq_cfgi_cd(sid);
smmuv3_flush_config(sdev);
if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, errp)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
break;
}
case SMMU_CMD_TLBI_NH_ASID:
@@ -1411,6 +1415,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, Error **errp)
trace_smmuv3_cmdq_tlbi_nh_asid(asid);
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, errp)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
break;
}
case SMMU_CMD_TLBI_NH_ALL:
@@ -1438,6 +1446,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, Error **errp)
trace_smmuv3_cmdq_tlbi_nsnh();
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_all(bs);
if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, errp)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
break;
case SMMU_CMD_TLBI_NH_VAA:
case SMMU_CMD_TLBI_NH_VA:
@@ -1446,6 +1458,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, Error **errp)
break;
}
smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, errp)) {
cmd_error = SMMU_CERROR_ILL;
break;
}
break;
case SMMU_CMD_TLBI_S12_VMALL:
{