Lines Matching refs:bpf

27 	struct nfp_app_bpf *bpf = nn->app->priv;
30 bpf->abi_version &&
31 nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version;
206 nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
210 struct nfp_cpp *cpp = bpf->app->pf->cpp;
217 bpf->adjust_head.flags = readl(&cap->flags);
218 bpf->adjust_head.off_min = readl(&cap->off_min);
219 bpf->adjust_head.off_max = readl(&cap->off_max);
220 bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
221 bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
223 if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
227 if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
228 !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
230 memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
238 nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
243 nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
249 bpf->helpers.map_lookup = readl(&cap->func_addr);
252 bpf->helpers.map_update = readl(&cap->func_addr);
255 bpf->helpers.map_delete = readl(&cap->func_addr);
258 bpf->helpers.perf_event_output = readl(&cap->func_addr);
266 nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
271 nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
275 bpf->maps.types = readl(&cap->types);
276 bpf->maps.max_maps = readl(&cap->max_maps);
277 bpf->maps.max_elems = readl(&cap->max_elems);
278 bpf->maps.max_key_sz = readl(&cap->max_key_sz);
279 bpf->maps.max_val_sz = readl(&cap->max_val_sz);
280 bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
286 nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value,
289 bpf->pseudo_random = true;
294 nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
296 bpf->queue_select = true;
301 nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
304 bpf->adjust_tail = true;
309 nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value,
312 bpf->cmsg_multi_ent = true;
317 nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
321 nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n",
326 bpf->abi_version = readl(value);
327 if (bpf->abi_version < 2 || bpf->abi_version > 3) {
328 nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
329 bpf->abi_version);
330 bpf->abi_version = 0;
342 mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
418 static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf)
420 bpf->abi_version = 2; /* Original BPF ABI version */
425 struct nfp_app_bpf *bpf = app->priv;
427 return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
432 struct nfp_app_bpf *bpf = app->priv;
434 bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
439 struct nfp_app_bpf *bpf = app->priv;
441 if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) {
442 nfp_err(bpf->app->cpp,
444 app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf));
448 if (bpf->cmsg_multi_ent)
449 bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf);
451 bpf->cmsg_cache_cnt = 1;
458 struct nfp_app_bpf *bpf;
461 bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
462 if (!bpf)
464 bpf->app = app;
465 app->priv = bpf;
467 INIT_LIST_HEAD(&bpf->map_list);
469 err = nfp_ccm_init(&bpf->ccm, app);
473 err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
477 nfp_bpf_init_capabilities(bpf);
483 if (bpf->abi_version < 3) {
484 bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4;
485 bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4;
487 bpf->cmsg_key_sz = bpf->maps.max_key_sz;
488 bpf->cmsg_val_sz = bpf->maps.max_val_sz;
489 app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
492 bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops, bpf);
493 err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
500 rhashtable_destroy(&bpf->maps_neutral);
502 nfp_ccm_clean(&bpf->ccm);
504 kfree(bpf);
510 struct nfp_app_bpf *bpf = app->priv;
512 bpf_offload_dev_destroy(bpf->bpf_dev);
513 nfp_ccm_clean(&bpf->ccm);
514 WARN_ON(!list_empty(&bpf->map_list));
515 WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
516 rhashtable_free_and_destroy(&bpf->maps_neutral,
518 kfree(bpf);
545 .bpf = nfp_ndo_bpf,