aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel/iommu.c
diff options
context:
space:
mode:
authorJason Gunthorpe <[email protected]>2025-04-08 16:54:09 +0000
committerJoerg Roedel <[email protected]>2025-04-17 14:22:51 +0000
commit5087f663c21e8ac2af7bd178a9b8da50ff7752ac (patch)
treeb650d88caa4eb5788a5d411f9c4fb0d771bbd5b2 /drivers/iommu/intel/iommu.c
parentiommu/pages: Remove iommu_alloc_page/pages() (diff)
downloadkernel-5087f663c21e8ac2af7bd178a9b8da50ff7752ac.tar.gz
kernel-5087f663c21e8ac2af7bd178a9b8da50ff7752ac.zip
iommu/pages: Remove iommu_alloc_page_node()
Use iommu_alloc_pages_node_sz() instead. AMD and Intel are both using 4K pages for these structures since those drivers only work on 4K PAGE_SIZE. riscv is also spec'd to use SZ_4K. Reviewed-by: Lu Baolu <[email protected]> Tested-by: Alejandro Jimenez <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
-rw-r--r--drivers/iommu/intel/iommu.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 0de9580372a2..5d5291887516 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -397,7 +397,8 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
- context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ SZ_4K);
if (!context)
return NULL;
@@ -731,7 +732,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval, tmp;
- tmp_page = iommu_alloc_page_node(domain->nid, gfp);
+ tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
+ SZ_4K);
if (!tmp_page)
return NULL;
@@ -982,7 +984,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@@ -2026,7 +2028,8 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
- new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
+ new_ce = iommu_alloc_pages_node_sz(iommu->node,
+ GFP_KERNEL, SZ_4K);
if (!new_ce)
goto out_unmap;
@@ -3359,7 +3362,7 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
/* always allocate the top pgd */
- domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
+ domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);