iommufd: Use GFP_KERNEL_ACCOUNT for iommu_map()
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain. Many drivers will allocate these at iommu_map() time and will trivially do the right thing if we pass in GFP_KERNEL_ACCOUNT. Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/5-v3-76b587fe28df+6e3-iommu_map_gfp_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
96d5780880
commit
e787a38e31
@ -457,7 +457,7 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
|
||||
|
||||
while (size) {
|
||||
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (rc)
|
||||
goto err_unmap;
|
||||
iova += PAGE_SIZE;
|
||||
@ -502,7 +502,7 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
|
||||
rc = iommu_map(domain, iova,
|
||||
PFN_PHYS(batch->pfns[cur]) + page_offset,
|
||||
next_iova - iova, area->iommu_prot,
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (rc)
|
||||
goto err_unmap;
|
||||
iova = next_iova;
|
||||
|
Loading…
x
Reference in New Issue
Block a user