aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netmem_priv.h
diff options
context:
space:
mode:
authorJakub Kicinski <[email protected]>2025-04-14 23:30:35 +0000
committerJakub Kicinski <[email protected]>2025-04-14 23:30:36 +0000
commit63ce43f2d7da1f863f43fb1bcc9422466887dc6c (patch)
treef45d18f63f5071e158df2f9cee8d6f1c7a4ca17a /net/core/netmem_priv.h
parentMerge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/... (diff)
parentpage_pool: Track DMA-mapped pages and unmap them when destroying the pool (diff)
downloadkernel-63ce43f2d7da1f863f43fb1bcc9422466887dc6c.tar.gz
kernel-63ce43f2d7da1f863f43fb1bcc9422466887dc6c.zip
Merge branch 'fix-late-dma-unmap-crash-for-page-pool'
Toke Høiland-Jørgensen says: ==================== Fix late DMA unmap crash for page pool This series fixes the late dma_unmap crash for page pool first reported by Yonglong Liu in [0]. It is an alternative approach to the one submitted by Yunsheng Lin, most recently in [1]. The first commit just wraps some tests in a helper function, in preparation of the main change in patch 2. See the commit message of patch 2 for the details. [0] https://lore.kernel.org/[email protected] [1] https://lore.kernel.org/[email protected] v8: https://lore.kernel.org/[email protected] v7: https://lore.kernel.org/[email protected] v6: https://lore.kernel.org/[email protected] v5: https://lore.kernel.org/[email protected] v4: https://lore.kernel.org/[email protected] v3: https://lore.kernel.org/[email protected] v2: https://lore.kernel.org/[email protected] v1: https://lore.kernel.org/[email protected] ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
Diffstat (limited to 'net/core/netmem_priv.h')
-rw-r--r--net/core/netmem_priv.h33
1 files changed, 32 insertions, 1 deletions
diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h
index 7eadb8393e00..cd95394399b4 100644
--- a/net/core/netmem_priv.h
+++ b/net/core/netmem_priv.h
@@ -5,7 +5,7 @@
static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
{
- return __netmem_clear_lsb(netmem)->pp_magic;
+ return __netmem_clear_lsb(netmem)->pp_magic & ~PP_DMA_INDEX_MASK;
}
static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
@@ -15,9 +15,16 @@ static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
static inline void netmem_clear_pp_magic(netmem_ref netmem)
{
+ WARN_ON_ONCE(__netmem_clear_lsb(netmem)->pp_magic & PP_DMA_INDEX_MASK);
+
__netmem_clear_lsb(netmem)->pp_magic = 0;
}
+static inline bool netmem_is_pp(netmem_ref netmem)
+{
+ return (netmem_get_pp_magic(netmem) & PP_MAGIC_MASK) == PP_SIGNATURE;
+}
+
static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
{
__netmem_clear_lsb(netmem)->pp = pool;
@@ -28,4 +35,28 @@ static inline void netmem_set_dma_addr(netmem_ref netmem,
{
__netmem_clear_lsb(netmem)->dma_addr = dma_addr;
}
+
+static inline unsigned long netmem_get_dma_index(netmem_ref netmem)
+{
+ unsigned long magic;
+
+ if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
+ return 0;
+
+ magic = __netmem_clear_lsb(netmem)->pp_magic;
+
+ return (magic & PP_DMA_INDEX_MASK) >> PP_DMA_INDEX_SHIFT;
+}
+
+static inline void netmem_set_dma_index(netmem_ref netmem,
+ unsigned long id)
+{
+ unsigned long magic;
+
+ if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
+ return;
+
+ magic = netmem_get_pp_magic(netmem) | (id << PP_DMA_INDEX_SHIFT);
+ __netmem_clear_lsb(netmem)->pp_magic = magic;
+}
#endif