1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _NET_PAGE_POOL_TYPES_H
4#define _NET_PAGE_POOL_TYPES_H
5
6#include <linux/dma-direction.h>
7#include <linux/ptr_ring.h>
8
9#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
10 * map/unmap
11 */
12#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
13 * from page_pool will be
14 * DMA-synced-for-device according to
15 * the length provided by the device
16 * driver.
17 * Please note DMA-sync-for-CPU is still
18 * device driver responsibility
19 */
20#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
21 PP_FLAG_DMA_SYNC_DEV)
22
23/*
24 * Fast allocation side cache array/stack
25 *
26 * The cache size and refill watermark is related to the network
27 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
28 * ring is usually refilled and the max consumed elements will be 64,
29 * thus a natural max size of objects needed in the cache.
30 *
31 * Keeping room for more objects, is due to XDP_DROP use-case. As
32 * XDP_DROP allows the opportunity to recycle objects directly into
33 * this array, as it shares the same softirq/NAPI protection. If
34 * cache is already full (or partly full) then the XDP_DROP recycles
35 * would have to take a slower code path.
36 */
37#define PP_ALLOC_CACHE_SIZE 128
38#define PP_ALLOC_CACHE_REFILL 64
39struct pp_alloc_cache {
40 u32 count;
41 struct page *cache[PP_ALLOC_CACHE_SIZE];
42};
43
44/**
45 * struct page_pool_params - page pool parameters
46 * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
47 * @order: 2^order pages on allocation
48 * @pool_size: size of the ptr_ring
49 * @nid: NUMA node id to allocate from pages from
50 * @dev: device, for DMA pre-mapping purposes
51 * @napi: NAPI which is the sole consumer of pages, otherwise NULL
52 * @dma_dir: DMA mapping direction
53 * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
54 * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
55 */
56struct page_pool_params {
57 unsigned int flags;
58 unsigned int order;
59 unsigned int pool_size;
60 int nid;
61 struct device *dev;
62 struct napi_struct *napi;
63 enum dma_data_direction dma_dir;
64 unsigned int max_len;
65 unsigned int offset;
66/* private: used by test code only */
67 void (*init_callback)(struct page *page, void *arg);
68 void *init_arg;
69};
70
71#ifdef CONFIG_PAGE_POOL_STATS
72/**
73 * struct page_pool_alloc_stats - allocation statistics
74 * @fast: successful fast path allocations
75 * @slow: slow path order-0 allocations
76 * @slow_high_order: slow path high order allocations
77 * @empty: ptr ring is empty, so a slow path allocation was forced
78 * @refill: an allocation which triggered a refill of the cache
79 * @waive: pages obtained from the ptr ring that cannot be added to
80 * the cache due to a NUMA mismatch
81 */
82struct page_pool_alloc_stats {
83 u64 fast;
84 u64 slow;
85 u64 slow_high_order;
86 u64 empty;
87 u64 refill;
88 u64 waive;
89};
90
91/**
92 * struct page_pool_recycle_stats - recycling (freeing) statistics
93 * @cached: recycling placed page in the page pool cache
94 * @cache_full: page pool cache was full
95 * @ring: page placed into the ptr ring
96 * @ring_full: page released from page pool because the ptr ring was full
97 * @released_refcnt: page released (and not recycled) because refcnt > 1
98 */
99struct page_pool_recycle_stats {
100 u64 cached;
101 u64 cache_full;
102 u64 ring;
103 u64 ring_full;
104 u64 released_refcnt;
105};
106
107/**
108 * struct page_pool_stats - combined page pool use statistics
109 * @alloc_stats: see struct page_pool_alloc_stats
110 * @recycle_stats: see struct page_pool_recycle_stats
111 *
112 * Wrapper struct for combining page pool stats with different storage
113 * requirements.
114 */
115struct page_pool_stats {
116 struct page_pool_alloc_stats alloc_stats;
117 struct page_pool_recycle_stats recycle_stats;
118};
119#endif
120
121struct page_pool {
122 struct page_pool_params p;
123
124 long frag_users;
125 struct page *frag_page;
126 unsigned int frag_offset;
127 u32 pages_state_hold_cnt;
128
129 struct delayed_work release_dw;
130 void (*disconnect)(void *pool);
131 unsigned long defer_start;
132 unsigned long defer_warn;
133
134#ifdef CONFIG_PAGE_POOL_STATS
135 /* these stats are incremented while in softirq context */
136 struct page_pool_alloc_stats alloc_stats;
137#endif
138 u32 xdp_mem_id;
139
140 /*
141 * Data structure for allocation side
142 *
143 * Drivers allocation side usually already perform some kind
144 * of resource protection. Piggyback on this protection, and
145 * require driver to protect allocation side.
146 *
147 * For NIC drivers this means, allocate a page_pool per
148 * RX-queue. As the RX-queue is already protected by
149 * Softirq/BH scheduling and napi_schedule. NAPI schedule
150 * guarantee that a single napi_struct will only be scheduled
151 * on a single CPU (see napi_schedule).
152 */
153 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
154
155 /* Data structure for storing recycled pages.
156 *
157 * Returning/freeing pages is more complicated synchronization
158 * wise, because free's can happen on remote CPUs, with no
159 * association with allocation resource.
160 *
161 * Use ptr_ring, as it separates consumer and producer
162 * efficiently, it a way that doesn't bounce cache-lines.
163 *
164 * TODO: Implement bulk return pages into this structure.
165 */
166 struct ptr_ring ring;
167
168#ifdef CONFIG_PAGE_POOL_STATS
169 /* recycle stats are per-cpu to avoid locking */
170 struct page_pool_recycle_stats __percpu *recycle_stats;
171#endif
172 atomic_t pages_state_release_cnt;
173
174 /* A page_pool is strictly tied to a single RX-queue being
175 * protected by NAPI, due to above pp_alloc_cache. This
176 * refcnt serves purpose is to simplify drivers error handling.
177 */
178 refcount_t user_cnt;
179
180 u64 destroy_cnt;
181};
182
183struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
184struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
185 unsigned int size, gfp_t gfp);
186struct page_pool *page_pool_create(const struct page_pool_params *params);
187
188struct xdp_mem_info;
189
190#ifdef CONFIG_PAGE_POOL
191void page_pool_unlink_napi(struct page_pool *pool);
192void page_pool_destroy(struct page_pool *pool);
193void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
194 struct xdp_mem_info *mem);
195void page_pool_put_page_bulk(struct page_pool *pool, void **data,
196 int count);
197#else
198static inline void page_pool_unlink_napi(struct page_pool *pool)
199{
200}
201
202static inline void page_pool_destroy(struct page_pool *pool)
203{
204}
205
206static inline void page_pool_use_xdp_mem(struct page_pool *pool,
207 void (*disconnect)(void *),
208 struct xdp_mem_info *mem)
209{
210}
211
212static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
213 int count)
214{
215}
216#endif
217
218void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
219 unsigned int dma_sync_size,
220 bool allow_direct);
221
222static inline bool is_page_pool_compiled_in(void)
223{
224#ifdef CONFIG_PAGE_POOL
225 return true;
226#else
227 return false;
228#endif
229}
230
231/* Caller must provide appropriate safe context, e.g. NAPI. */
232void page_pool_update_nid(struct page_pool *pool, int new_nid);
233
234#endif /* _NET_PAGE_POOL_H */
235

source code of linux/include/net/page_pool/types.h