1/*-
2 * Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This material is based upon work partially supported by The
6 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * NPF main: dynamic load/initialisation and unload routines.
32 */
33
34#ifdef _KERNEL
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: npf.c,v 1.44 2020/08/27 18:50:25 riastradh Exp $");
37
38#include <sys/param.h>
39#include <sys/types.h>
40
41#include <sys/conf.h>
42#include <sys/kmem.h>
43#include <sys/percpu.h>
44#include <sys/xcall.h>
45#endif
46
47#include "npf_impl.h"
48#include "npf_conn.h"
49
50static __read_mostly npf_t *	npf_kernel_ctx = NULL;
51
52__dso_public int
53npfk_sysinit(unsigned nworkers)
54{
55
56	npf_bpf_sysinit();
57	npf_tableset_sysinit();
58	npf_nat_sysinit();
59	npf_portmap_sysinit();
60	return npf_worker_sysinit(nworkers);
61}
62
63__dso_public void
64npfk_sysfini(void)
65{
66
67	npf_worker_sysfini();
68	npf_portmap_sysfini();
69	npf_nat_sysfini();
70	npf_tableset_sysfini();
71	npf_bpf_sysfini();
72}
73
74__dso_public npf_t *
75npfk_create(int flags, const npf_mbufops_t *mbufops,
76    const npf_ifops_t *ifops, void *arg)
77{
78	npf_t *npf;
79
80	npf = kmem_zalloc(sizeof(npf_t), KM_SLEEP);
81	npf->ebr = npf_ebr_create();
82	npf->stats_percpu = percpu_alloc(NPF_STATS_SIZE);
83	npf->mbufops = mbufops;
84	npf->arg = arg;
85
86	npf_param_init(npf);
87	npf_state_sysinit(npf);
88	npf_ifmap_init(npf, ifops);
89	npf_conn_init(npf);
90	npf_portmap_init(npf);
91	npf_alg_init(npf);
92	npf_ext_init(npf);
93
94	/* Load an empty configuration. */
95	npf_config_init(npf);
96
97	if ((flags & NPF_NO_GC) == 0) {
98		npf_worker_enlist(npf);
99	}
100	return npf;
101}
102
103__dso_public void
104npfk_destroy(npf_t *npf)
105{
106	npf_worker_discharge(npf);
107
108	/*
109	 * Destroy the current configuration.  Note: at this point all
110	 * handlers must be deactivated; we will drain any processing.
111	 */
112	npf_config_fini(npf);
113
114	/* Finally, safe to destroy the subsystems. */
115	npf_ext_fini(npf);
116	npf_alg_fini(npf);
117	npf_portmap_fini(npf);
118	npf_conn_fini(npf);
119	npf_ifmap_fini(npf);
120	npf_state_sysfini(npf);
121	npf_param_fini(npf);
122
123	npf_ebr_destroy(npf->ebr);
124	percpu_free(npf->stats_percpu, NPF_STATS_SIZE);
125	kmem_free(npf, sizeof(npf_t));
126}
127
128
129/*
130 * npfk_load: (re)load the configuration.
131 *
132 * => Will not modify the configuration reference.
133 */
134__dso_public int
135npfk_load(npf_t *npf, const void *config_ref, npf_error_t *err)
136{
137	const nvlist_t *req = (const nvlist_t *)config_ref;
138	nvlist_t *resp;
139	int error;
140
141	resp = nvlist_create(0);
142	error = npfctl_run_op(npf, IOC_NPF_LOAD, req, resp);
143	nvlist_destroy(resp);
144
145	return error;
146}
147
148__dso_public void
149npfk_gc(npf_t *npf)
150{
151	npf_conn_worker(npf);
152}
153
154__dso_public void
155npfk_thread_register(npf_t *npf)
156{
157	npf_ebr_register(npf->ebr);
158}
159
160__dso_public void
161npfk_thread_unregister(npf_t *npf)
162{
163	npf_ebr_full_sync(npf->ebr);
164	npf_ebr_unregister(npf->ebr);
165}
166
167__dso_public void *
168npfk_getarg(npf_t *npf)
169{
170	return npf->arg;
171}
172
173void
174npf_setkernctx(npf_t *npf)
175{
176	npf_kernel_ctx = npf;
177}
178
179npf_t *
180npf_getkernctx(void)
181{
182	return npf_kernel_ctx;
183}
184
185/*
186 * NPF statistics interface.
187 */
188
189void
190npf_stats_inc(npf_t *npf, npf_stats_t st)
191{
192	uint64_t *stats = percpu_getref(npf->stats_percpu);
193	stats[st]++;
194	percpu_putref(npf->stats_percpu);
195}
196
197void
198npf_stats_dec(npf_t *npf, npf_stats_t st)
199{
200	uint64_t *stats = percpu_getref(npf->stats_percpu);
201	stats[st]--;
202	percpu_putref(npf->stats_percpu);
203}
204
205static void
206npf_stats_collect(void *mem, void *arg, struct cpu_info *ci)
207{
208	uint64_t *percpu_stats = mem, *full_stats = arg;
209
210	for (unsigned i = 0; i < NPF_STATS_COUNT; i++) {
211		full_stats[i] += percpu_stats[i];
212	}
213}
214
215static void
216npf_stats_clear_cb(void *mem, void *arg, struct cpu_info *ci)
217{
218	uint64_t *percpu_stats = mem;
219
220	for (unsigned i = 0; i < NPF_STATS_COUNT; i++) {
221		percpu_stats[i] = 0;
222	}
223}
224
225/*
226 * npf_stats: export collected statistics.
227 */
228
229__dso_public void
230npfk_stats(npf_t *npf, uint64_t *buf)
231{
232	memset(buf, 0, NPF_STATS_SIZE);
233	percpu_foreach_xcall(npf->stats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET),
234	    npf_stats_collect, buf);
235}
236
237__dso_public void
238npfk_stats_clear(npf_t *npf)
239{
240	percpu_foreach_xcall(npf->stats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET),
241	    npf_stats_clear_cb, NULL);
242}
243