1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/xarray.h>
#include <net/net_debug.h>
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/sock.h>
#include "page_pool_priv.h"
#include "netdev-genl-gen.h"
static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
* Ordering: inside rtnl_lock
*/
static DEFINE_MUTEX(page_pools_lock);
/* Page pools are only reachable from user space (via netlink) if they are
* linked to a netdev at creation time. Following page pool "visibility"
* states are possible:
* - normal
* - user.list: linked to real netdev, netdev: real netdev
* - orphaned - real netdev has disappeared
* - user.list: linked to lo, netdev: lo
* - invisible - either (a) created without netdev linking, (b) unlisted due
* to error, or (c) the entire namespace which owned this pool disappeared
* - user.list: unhashed, netdev: unknown
*/
typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info);
static int
netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill)
{
struct page_pool *pool;
struct sk_buff *rsp;
int err;
mutex_lock(&page_pools_lock);
pool = xa_load(&page_pools, id);
if (!pool || hlist_unhashed(&pool->user.list) ||
!net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) {
err = -ENOENT;
goto err_unlock;
}
rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!rsp) {
err = -ENOMEM;
goto err_unlock;
}
err = fill(rsp, pool, info);
if (err)
goto err_free_msg;
mutex_unlock(&page_pools_lock);
return genlmsg_reply(rsp, info);
err_free_msg:
nlmsg_free(rsp);
err_unlock:
mutex_unlock(&page_pools_lock);
return err;
}
struct page_pool_dump_cb {
unsigned long ifindex;
u32 pp_id;
};
static int
netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
pp_nl_fill_cb fill)
{
struct page_pool_dump_cb *state = (void *)cb->ctx;
const struct genl_info *info = genl_info_dump(cb);
struct net *net = sock_net(skb->sk);
struct net_device *netdev;
struct page_pool *pool;
int err = 0;
rtnl_lock();
mutex_lock(&page_pools_lock);
for_each_netdev_dump(net, netdev, state->ifindex) {
hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
if (state->pp_id && state->pp_id < pool->user.id)
continue;
state->pp_id = pool->user.id;
err = fill(skb, pool, info);
if (err)
break;
}
state->pp_id = 0;
}
mutex_unlock(&page_pools_lock);
rtnl_unlock();
if (skb->len && err == -EMSGSIZE)
return skb->len;
return err;
}
static int
page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info)
{
#ifdef CONFIG_PAGE_POOL_STATS
struct page_pool_stats stats = {};
struct nlattr *nest;
void *hdr;
if (!page_pool_get_stats(pool, &stats))
return 0;
hdr = genlmsg_iput(rsp, info);
if (!hdr)
return -EMSGSIZE;
nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO);
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
(pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
pool->slow.netdev->ifindex)))
goto err_cancel_nest;
nla_nest_end(rsp, nest);
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
stats.alloc_stats.fast) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
stats.alloc_stats.slow) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
stats.alloc_stats.slow_high_order) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
stats.alloc_stats.empty) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
stats.alloc_stats.refill) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
stats.alloc_stats.waive) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
stats.recycle_stats.cached) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
stats.recycle_stats.cache_full) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
stats.recycle_stats.ring) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
stats.recycle_stats.ring_full) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
stats.recycle_stats.released_refcnt))
goto err_cancel_msg;
genlmsg_end(rsp, hdr);
return 0;
err_cancel_nest:
nla_nest_cancel(rsp, nest);
err_cancel_msg:
genlmsg_cancel(rsp, hdr);
return -EMSGSIZE;
#else
GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS");
return -EOPNOTSUPP;
#endif
}
int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)];
struct nlattr *nest;
int err;
u32 id;
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO))
return -EINVAL;
nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
netdev_page_pool_info_nl_policy,
info->extack);
if (err)
return err;
if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID))
return -EINVAL;
if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[NETDEV_A_PAGE_POOL_IFINDEX],
"selecting by ifindex not supported");
return -EINVAL;
}
id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]);
return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
}
int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
}
static int
page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info)
{
size_t inflight, refsz;
void *hdr;
hdr = genlmsg_iput(rsp, info);
if (!hdr)
return -EMSGSIZE;
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
goto err_cancel;
if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
pool->slow.netdev->ifindex))
goto err_cancel;
if (pool->user.napi_id &&
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
goto err_cancel;
inflight = page_pool_inflight(pool, false);
refsz = PAGE_SIZE << pool->p.order;
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
inflight * refsz))
goto err_cancel;
if (pool->user.detach_time &&
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME,
pool->user.detach_time))
goto err_cancel;
genlmsg_end(rsp, hdr);
return 0;
err_cancel:
genlmsg_cancel(rsp, hdr);
return -EMSGSIZE;
}
static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd)
{
struct genl_info info;
struct sk_buff *ntf;
struct net *net;
lockdep_assert_held(&page_pools_lock);
/* 'invisible' page pools don't matter */
if (hlist_unhashed(&pool->user.list))
return;
net = dev_net(pool->slow.netdev);
if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL))
return;
genl_info_init_ntf(&info, &netdev_nl_family, cmd);
ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!ntf)
return;
if (page_pool_nl_fill(ntf, pool, &info)) {
nlmsg_free(ntf);
return;
}
genlmsg_multicast_netns(&netdev_nl_family, net, ntf,
0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL);
}
int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
{
u32 id;
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID))
return -EINVAL;
id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]);
return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill);
}
int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
}
int page_pool_list(struct page_pool *pool)
{
static u32 id_alloc_next;
int err;
mutex_lock(&page_pools_lock);
err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b,
&id_alloc_next, GFP_KERNEL);
if (err < 0)
goto err_unlock;
INIT_HLIST_NODE(&pool->user.list);
if (pool->slow.netdev) {
hlist_add_head(&pool->user.list,
&pool->slow.netdev->page_pools);
pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
}
mutex_unlock(&page_pools_lock);
return 0;
err_unlock:
mutex_unlock(&page_pools_lock);
return err;
}
void page_pool_detached(struct page_pool *pool)
{
mutex_lock(&page_pools_lock);
pool->user.detach_time = ktime_get_boottime_seconds();
netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
mutex_unlock(&page_pools_lock);
}
void page_pool_unlist(struct page_pool *pool)
{
mutex_lock(&page_pools_lock);
netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF);
xa_erase(&page_pools, pool->user.id);
if (!hlist_unhashed(&pool->user.list))
hlist_del(&pool->user.list);
mutex_unlock(&page_pools_lock);
}
static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
{
struct page_pool *pool;
struct hlist_node *n;
mutex_lock(&page_pools_lock);
hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
hlist_del_init(&pool->user.list);
pool->slow.netdev = NET_PTR_POISON;
}
mutex_unlock(&page_pools_lock);
}
static void page_pool_unreg_netdev(struct net_device *netdev)
{
struct page_pool *pool, *last;
struct net_device *lo;
lo = dev_net(netdev)->loopback_dev;
mutex_lock(&page_pools_lock);
last = NULL;
hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
pool->slow.netdev = lo;
netdev_nl_page_pool_event(pool,
NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
last = pool;
}
if (last)
hlist_splice_init(&netdev->page_pools, &last->user.list,
&lo->page_pools);
mutex_unlock(&page_pools_lock);
}
static int
page_pool_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
if (hlist_empty(&netdev->page_pools))
return NOTIFY_OK;
if (netdev->ifindex != LOOPBACK_IFINDEX)
page_pool_unreg_netdev(netdev);
else
page_pool_unreg_netdev_wipe(netdev);
return NOTIFY_OK;
}
static struct notifier_block page_pool_netdevice_nb = {
.notifier_call = page_pool_netdevice_event,
};
static int __init page_pool_user_init(void)
{
return register_netdevice_notifier(&page_pool_netdevice_nb);
}
subsys_initcall(page_pool_user_init);
|