37 38/** 39 * \file netback.c 40 * 41 * \brief Device driver supporting the vending of network access 42 * from this FreeBSD domain to other domains. 43 */ 44#include "opt_inet.h" 45#include "opt_inet6.h" 46 47#include "opt_sctp.h" 48 49#include <sys/param.h> 50#include <sys/kernel.h> 51 52#include <sys/bus.h> 53#include <sys/module.h> 54#include <sys/rman.h> 55#include <sys/socket.h> 56#include <sys/sockio.h> 57#include <sys/sysctl.h> 58 59#include <net/if.h> 60#include <net/if_var.h> 61#include <net/if_arp.h> 62#include <net/ethernet.h> 63#include <net/if_dl.h> 64#include <net/if_media.h> 65#include <net/if_types.h> 66 67#include <netinet/in.h> 68#include <netinet/ip.h> 69#include <netinet/if_ether.h> 70#if __FreeBSD_version >= 700000 71#include <netinet/tcp.h> 72#endif 73#include <netinet/ip_icmp.h> 74#include <netinet/udp.h> 75#include <machine/in_cksum.h> 76 77#include <vm/vm.h> 78#include <vm/pmap.h> 79#include <vm/vm_extern.h> 80#include <vm/vm_kern.h> 81 82#include <machine/_inttypes.h> 83 84#include <xen/xen-os.h> 85#include <xen/hypervisor.h> 86#include <xen/xen_intr.h> 87#include <xen/interface/io/netif.h> 88#include <xen/xenbus/xenbusvar.h> 89 90#include <machine/xen/xenvar.h> 91 92/*--------------------------- Compile-time Tunables --------------------------*/ 93 94/*---------------------------------- Macros ----------------------------------*/ 95/** 96 * Custom malloc type for all driver allocations. 97 */ 98static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data"); 99 100#define XNB_SG 1 /* netback driver supports feature-sg */ 101#define XNB_GSO_TCPV4 0 /* netback driver supports feature-gso-tcpv4 */ 102#define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */ 103#define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */ 104 105#undef XNB_DEBUG 106#define XNB_DEBUG /* hardcode on during development */ 107 108#ifdef XNB_DEBUG 109#define DPRINTF(fmt, args...) \ 110 printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 111#else 112#define DPRINTF(fmt, args...) do {} while (0) 113#endif 114 115/* Default length for stack-allocated grant tables */ 116#define GNTTAB_LEN (64) 117 118/* Features supported by all backends. TSO and LRO can be negotiated */ 119#define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 120 121#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 122#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 123 124/** 125 * Two argument version of the standard macro. Second argument is a tentative 126 * value of req_cons 127 */ 128#define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \ 129 unsigned int req = (_r)->sring->req_prod - cons; \ 130 unsigned int rsp = RING_SIZE(_r) - \ 131 (cons - (_r)->rsp_prod_pvt); \ 132 req < rsp ? req : rsp; \ 133}) 134 135#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 136#define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) 137 138/** 139 * Predefined array type of grant table copy descriptors. Used to pass around 140 * statically allocated memory structures. 141 */ 142typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; 143 144/*--------------------------- Forward Declarations ---------------------------*/ 145struct xnb_softc; 146struct xnb_pkt; 147 148static void xnb_attach_failed(struct xnb_softc *xnb, 149 int err, const char *fmt, ...) 150 __printflike(3,4); 151static int xnb_shutdown(struct xnb_softc *xnb); 152static int create_netdev(device_t dev); 153static int xnb_detach(device_t dev); 154static int xnb_ifmedia_upd(struct ifnet *ifp); 155static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 156static void xnb_intr(void *arg); 157static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, 158 const struct mbuf *mbufc, gnttab_copy_table gnttab); 159static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, 160 struct mbuf **mbufc, struct ifnet *ifnet, 161 gnttab_copy_table gnttab); 162static int xnb_ring2pkt(struct xnb_pkt *pkt, 163 const netif_tx_back_ring_t *tx_ring, 164 RING_IDX start); 165static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, 166 netif_tx_back_ring_t *ring, int error); 167static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); 168static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, 169 const struct mbuf *mbufc, 170 gnttab_copy_table gnttab, 171 const netif_tx_back_ring_t *txb, 172 domid_t otherend_id); 173static void xnb_update_mbufc(struct mbuf *mbufc, 174 const gnttab_copy_table gnttab, int n_entries); 175static int xnb_mbufc2pkt(const struct mbuf *mbufc, 176 struct xnb_pkt *pkt, 177 RING_IDX start, int space); 178static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, 179 const struct mbuf *mbufc, 180 gnttab_copy_table gnttab, 181 const netif_rx_back_ring_t *rxb, 182 domid_t otherend_id); 183static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, 184 const gnttab_copy_table gnttab, int n_entries, 185 netif_rx_back_ring_t *ring); 186static void xnb_stop(struct xnb_softc*); 187static int xnb_ioctl(struct ifnet*, u_long, caddr_t); 188static void xnb_start_locked(struct ifnet*); 189static void xnb_start(struct ifnet*); 190static void xnb_ifinit_locked(struct xnb_softc*); 191static void xnb_ifinit(void*); 192#ifdef XNB_DEBUG 193static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); 194static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); 195#endif 196#if defined(INET) || defined(INET6) 197static void xnb_add_mbuf_cksum(struct mbuf *mbufc); 198#endif 199/*------------------------------ Data Structures -----------------------------*/ 200 201 202/** 203 * Representation of a xennet packet. Simplified version of a packet as 204 * stored in the Xen tx ring. Applicable to both RX and TX packets 205 */ 206struct xnb_pkt{ 207 /** 208 * Array index of the first data-bearing (eg, not extra info) entry 209 * for this packet 210 */ 211 RING_IDX car; 212 213 /** 214 * Array index of the second data-bearing entry for this packet. 215 * Invalid if the packet has only one data-bearing entry. If the 216 * packet has more than two data-bearing entries, then the second 217 * through the last will be sequential modulo the ring size 218 */ 219 RING_IDX cdr; 220 221 /** 222 * Optional extra info. Only valid if flags contains 223 * NETTXF_extra_info. Note that extra.type will always be 224 * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback 225 * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* 226 */ 227 netif_extra_info_t extra; 228 229 /** Size of entire packet in bytes. */ 230 uint16_t size; 231 232 /** The size of the first entry's data in bytes */ 233 uint16_t car_size; 234 235 /** 236 * Either NETTXF_ or NETRXF_ flags. Note that the flag values are 237 * not the same for TX and RX packets 238 */ 239 uint16_t flags; 240 241 /** 242 * The number of valid data-bearing entries (either netif_tx_request's 243 * or netif_rx_response's) in the packet. If this is 0, it means the 244 * entire packet is invalid. 245 */ 246 uint16_t list_len; 247 248 /** There was an error processing the packet */ 249 uint8_t error; 250}; 251 252/** xnb_pkt method: initialize it */ 253static inline void 254xnb_pkt_initialize(struct xnb_pkt *pxnb) 255{ 256 bzero(pxnb, sizeof(*pxnb)); 257} 258 259/** xnb_pkt method: mark the packet as valid */ 260static inline void 261xnb_pkt_validate(struct xnb_pkt *pxnb) 262{ 263 pxnb->error = 0; 264}; 265 266/** xnb_pkt method: mark the packet as invalid */ 267static inline void 268xnb_pkt_invalidate(struct xnb_pkt *pxnb) 269{ 270 pxnb->error = 1; 271}; 272 273/** xnb_pkt method: Check whether the packet is valid */ 274static inline int 275xnb_pkt_is_valid(const struct xnb_pkt *pxnb) 276{ 277 return (! pxnb->error); 278} 279 280#ifdef XNB_DEBUG 281/** xnb_pkt method: print the packet's contents in human-readable format*/ 282static void __unused 283xnb_dump_pkt(const struct xnb_pkt *pkt) { 284 if (pkt == NULL) { 285 DPRINTF("Was passed a null pointer.\n"); 286 return; 287 } 288 DPRINTF("pkt address= %p\n", pkt); 289 DPRINTF("pkt->size=%d\n", pkt->size); 290 DPRINTF("pkt->car_size=%d\n", pkt->car_size); 291 DPRINTF("pkt->flags=0x%04x\n", pkt->flags); 292 DPRINTF("pkt->list_len=%d\n", pkt->list_len); 293 /* DPRINTF("pkt->extra"); TODO */ 294 DPRINTF("pkt->car=%d\n", pkt->car); 295 DPRINTF("pkt->cdr=%d\n", pkt->cdr); 296 DPRINTF("pkt->error=%d\n", pkt->error); 297} 298#endif /* XNB_DEBUG */ 299 300static void 301xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) 302{ 303 if (txreq != NULL) { 304 DPRINTF("netif_tx_request index =%u\n", idx); 305 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); 306 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); 307 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); 308 DPRINTF("netif_tx_request.id =%hu\n", txreq->id); 309 DPRINTF("netif_tx_request.size =%hu\n", txreq->size); 310 } 311} 312 313 314/** 315 * \brief Configuration data for a shared memory request ring 316 * used to communicate with the front-end client of this 317 * this driver. 318 */ 319struct xnb_ring_config { 320 /** 321 * Runtime structures for ring access. Unfortunately, TX and RX rings 322 * use different data structures, and that cannot be changed since it 323 * is part of the interdomain protocol. 324 */ 325 union{ 326 netif_rx_back_ring_t rx_ring; 327 netif_tx_back_ring_t tx_ring; 328 } back_ring; 329 330 /** 331 * The device bus address returned by the hypervisor when 332 * mapping the ring and required to unmap it when a connection 333 * is torn down. 334 */ 335 uint64_t bus_addr; 336 337 /** The pseudo-physical address where ring memory is mapped.*/ 338 uint64_t gnt_addr; 339 340 /** KVA address where ring memory is mapped. */ 341 vm_offset_t va; 342 343 /** 344 * Grant table handles, one per-ring page, returned by the 345 * hyperpervisor upon mapping of the ring and required to 346 * unmap it when a connection is torn down. 347 */ 348 grant_handle_t handle; 349 350 /** The number of ring pages mapped for the current connection. */ 351 unsigned ring_pages; 352 353 /** 354 * The grant references, one per-ring page, supplied by the 355 * front-end, allowing us to reference the ring pages in the 356 * front-end's domain and to map these pages into our own domain. 357 */ 358 grant_ref_t ring_ref; 359}; 360 361/** 362 * Per-instance connection state flags. 363 */ 364typedef enum 365{ 366 /** Communication with the front-end has been established. */ 367 XNBF_RING_CONNECTED = 0x01, 368 369 /** 370 * Front-end requests exist in the ring and are waiting for 371 * xnb_xen_req objects to free up. 372 */ 373 XNBF_RESOURCE_SHORTAGE = 0x02, 374 375 /** Connection teardown has started. */ 376 XNBF_SHUTDOWN = 0x04, 377 378 /** A thread is already performing shutdown processing. */ 379 XNBF_IN_SHUTDOWN = 0x08 380} xnb_flag_t; 381 382/** 383 * Types of rings. Used for array indices and to identify a ring's control 384 * data structure type 385 */ 386typedef enum{ 387 XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ 388 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ 389 XNB_NUM_RING_TYPES 390} xnb_ring_type_t; 391 392/** 393 * Per-instance configuration data. 394 */ 395struct xnb_softc { 396 /** NewBus device corresponding to this instance. */ 397 device_t dev; 398 399 /* Media related fields */ 400 401 /** Generic network media state */ 402 struct ifmedia sc_media; 403 404 /** Media carrier info */ 405 struct ifnet *xnb_ifp; 406 407 /** Our own private carrier state */ 408 unsigned carrier; 409 410 /** Device MAC Address */ 411 uint8_t mac[ETHER_ADDR_LEN]; 412 413 /* Xen related fields */ 414 415 /** 416 * \brief The netif protocol abi in effect. 417 * 418 * There are situations where the back and front ends can 419 * have a different, native abi (e.g. intel x86_64 and 420 * 32bit x86 domains on the same machine). The back-end 421 * always accomodates the front-end's native abi. That 422 * value is pulled from the XenStore and recorded here. 423 */ 424 int abi; 425 426 /** 427 * Name of the bridge to which this VIF is connected, if any 428 * This field is dynamically allocated by xenbus and must be free()ed 429 * when no longer needed 430 */ 431 char *bridge; 432 433 /** The interrupt driven even channel used to signal ring events. */ 434 evtchn_port_t evtchn; 435 436 /** Xen device handle.*/ 437 long handle; 438 439 /** Handle to the communication ring event channel. */ 440 xen_intr_handle_t xen_intr_handle; 441 442 /** 443 * \brief Cached value of the front-end's domain id. 444 * 445 * This value is used at once for each mapped page in 446 * a transaction. We cache it to avoid incuring the 447 * cost of an ivar access every time this is needed. 448 */ 449 domid_t otherend_id; 450 451 /** 452 * Undocumented frontend feature. Has something to do with 453 * scatter/gather IO 454 */ 455 uint8_t can_sg; 456 /** Undocumented frontend feature */ 457 uint8_t gso; 458 /** Undocumented frontend feature */ 459 uint8_t gso_prefix; 460 /** Can checksum TCP/UDP over IPv4 */ 461 uint8_t ip_csum; 462 463 /* Implementation related fields */ 464 /** 465 * Preallocated grant table copy descriptor for RX operations. 466 * Access must be protected by rx_lock 467 */ 468 gnttab_copy_table rx_gnttab; 469 470 /** 471 * Preallocated grant table copy descriptor for TX operations. 472 * Access must be protected by tx_lock 473 */ 474 gnttab_copy_table tx_gnttab; 475
| 37 38/** 39 * \file netback.c 40 * 41 * \brief Device driver supporting the vending of network access 42 * from this FreeBSD domain to other domains. 43 */ 44#include "opt_inet.h" 45#include "opt_inet6.h" 46 47#include "opt_sctp.h" 48 49#include <sys/param.h> 50#include <sys/kernel.h> 51 52#include <sys/bus.h> 53#include <sys/module.h> 54#include <sys/rman.h> 55#include <sys/socket.h> 56#include <sys/sockio.h> 57#include <sys/sysctl.h> 58 59#include <net/if.h> 60#include <net/if_var.h> 61#include <net/if_arp.h> 62#include <net/ethernet.h> 63#include <net/if_dl.h> 64#include <net/if_media.h> 65#include <net/if_types.h> 66 67#include <netinet/in.h> 68#include <netinet/ip.h> 69#include <netinet/if_ether.h> 70#if __FreeBSD_version >= 700000 71#include <netinet/tcp.h> 72#endif 73#include <netinet/ip_icmp.h> 74#include <netinet/udp.h> 75#include <machine/in_cksum.h> 76 77#include <vm/vm.h> 78#include <vm/pmap.h> 79#include <vm/vm_extern.h> 80#include <vm/vm_kern.h> 81 82#include <machine/_inttypes.h> 83 84#include <xen/xen-os.h> 85#include <xen/hypervisor.h> 86#include <xen/xen_intr.h> 87#include <xen/interface/io/netif.h> 88#include <xen/xenbus/xenbusvar.h> 89 90#include <machine/xen/xenvar.h> 91 92/*--------------------------- Compile-time Tunables --------------------------*/ 93 94/*---------------------------------- Macros ----------------------------------*/ 95/** 96 * Custom malloc type for all driver allocations. 97 */ 98static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data"); 99 100#define XNB_SG 1 /* netback driver supports feature-sg */ 101#define XNB_GSO_TCPV4 0 /* netback driver supports feature-gso-tcpv4 */ 102#define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */ 103#define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */ 104 105#undef XNB_DEBUG 106#define XNB_DEBUG /* hardcode on during development */ 107 108#ifdef XNB_DEBUG 109#define DPRINTF(fmt, args...) \ 110 printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 111#else 112#define DPRINTF(fmt, args...) do {} while (0) 113#endif 114 115/* Default length for stack-allocated grant tables */ 116#define GNTTAB_LEN (64) 117 118/* Features supported by all backends. TSO and LRO can be negotiated */ 119#define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 120 121#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 122#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 123 124/** 125 * Two argument version of the standard macro. Second argument is a tentative 126 * value of req_cons 127 */ 128#define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \ 129 unsigned int req = (_r)->sring->req_prod - cons; \ 130 unsigned int rsp = RING_SIZE(_r) - \ 131 (cons - (_r)->rsp_prod_pvt); \ 132 req < rsp ? req : rsp; \ 133}) 134 135#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 136#define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) 137 138/** 139 * Predefined array type of grant table copy descriptors. Used to pass around 140 * statically allocated memory structures. 141 */ 142typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; 143 144/*--------------------------- Forward Declarations ---------------------------*/ 145struct xnb_softc; 146struct xnb_pkt; 147 148static void xnb_attach_failed(struct xnb_softc *xnb, 149 int err, const char *fmt, ...) 150 __printflike(3,4); 151static int xnb_shutdown(struct xnb_softc *xnb); 152static int create_netdev(device_t dev); 153static int xnb_detach(device_t dev); 154static int xnb_ifmedia_upd(struct ifnet *ifp); 155static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 156static void xnb_intr(void *arg); 157static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, 158 const struct mbuf *mbufc, gnttab_copy_table gnttab); 159static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, 160 struct mbuf **mbufc, struct ifnet *ifnet, 161 gnttab_copy_table gnttab); 162static int xnb_ring2pkt(struct xnb_pkt *pkt, 163 const netif_tx_back_ring_t *tx_ring, 164 RING_IDX start); 165static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, 166 netif_tx_back_ring_t *ring, int error); 167static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); 168static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, 169 const struct mbuf *mbufc, 170 gnttab_copy_table gnttab, 171 const netif_tx_back_ring_t *txb, 172 domid_t otherend_id); 173static void xnb_update_mbufc(struct mbuf *mbufc, 174 const gnttab_copy_table gnttab, int n_entries); 175static int xnb_mbufc2pkt(const struct mbuf *mbufc, 176 struct xnb_pkt *pkt, 177 RING_IDX start, int space); 178static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, 179 const struct mbuf *mbufc, 180 gnttab_copy_table gnttab, 181 const netif_rx_back_ring_t *rxb, 182 domid_t otherend_id); 183static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, 184 const gnttab_copy_table gnttab, int n_entries, 185 netif_rx_back_ring_t *ring); 186static void xnb_stop(struct xnb_softc*); 187static int xnb_ioctl(struct ifnet*, u_long, caddr_t); 188static void xnb_start_locked(struct ifnet*); 189static void xnb_start(struct ifnet*); 190static void xnb_ifinit_locked(struct xnb_softc*); 191static void xnb_ifinit(void*); 192#ifdef XNB_DEBUG 193static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); 194static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); 195#endif 196#if defined(INET) || defined(INET6) 197static void xnb_add_mbuf_cksum(struct mbuf *mbufc); 198#endif 199/*------------------------------ Data Structures -----------------------------*/ 200 201 202/** 203 * Representation of a xennet packet. Simplified version of a packet as 204 * stored in the Xen tx ring. Applicable to both RX and TX packets 205 */ 206struct xnb_pkt{ 207 /** 208 * Array index of the first data-bearing (eg, not extra info) entry 209 * for this packet 210 */ 211 RING_IDX car; 212 213 /** 214 * Array index of the second data-bearing entry for this packet. 215 * Invalid if the packet has only one data-bearing entry. If the 216 * packet has more than two data-bearing entries, then the second 217 * through the last will be sequential modulo the ring size 218 */ 219 RING_IDX cdr; 220 221 /** 222 * Optional extra info. Only valid if flags contains 223 * NETTXF_extra_info. Note that extra.type will always be 224 * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback 225 * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* 226 */ 227 netif_extra_info_t extra; 228 229 /** Size of entire packet in bytes. */ 230 uint16_t size; 231 232 /** The size of the first entry's data in bytes */ 233 uint16_t car_size; 234 235 /** 236 * Either NETTXF_ or NETRXF_ flags. Note that the flag values are 237 * not the same for TX and RX packets 238 */ 239 uint16_t flags; 240 241 /** 242 * The number of valid data-bearing entries (either netif_tx_request's 243 * or netif_rx_response's) in the packet. If this is 0, it means the 244 * entire packet is invalid. 245 */ 246 uint16_t list_len; 247 248 /** There was an error processing the packet */ 249 uint8_t error; 250}; 251 252/** xnb_pkt method: initialize it */ 253static inline void 254xnb_pkt_initialize(struct xnb_pkt *pxnb) 255{ 256 bzero(pxnb, sizeof(*pxnb)); 257} 258 259/** xnb_pkt method: mark the packet as valid */ 260static inline void 261xnb_pkt_validate(struct xnb_pkt *pxnb) 262{ 263 pxnb->error = 0; 264}; 265 266/** xnb_pkt method: mark the packet as invalid */ 267static inline void 268xnb_pkt_invalidate(struct xnb_pkt *pxnb) 269{ 270 pxnb->error = 1; 271}; 272 273/** xnb_pkt method: Check whether the packet is valid */ 274static inline int 275xnb_pkt_is_valid(const struct xnb_pkt *pxnb) 276{ 277 return (! pxnb->error); 278} 279 280#ifdef XNB_DEBUG 281/** xnb_pkt method: print the packet's contents in human-readable format*/ 282static void __unused 283xnb_dump_pkt(const struct xnb_pkt *pkt) { 284 if (pkt == NULL) { 285 DPRINTF("Was passed a null pointer.\n"); 286 return; 287 } 288 DPRINTF("pkt address= %p\n", pkt); 289 DPRINTF("pkt->size=%d\n", pkt->size); 290 DPRINTF("pkt->car_size=%d\n", pkt->car_size); 291 DPRINTF("pkt->flags=0x%04x\n", pkt->flags); 292 DPRINTF("pkt->list_len=%d\n", pkt->list_len); 293 /* DPRINTF("pkt->extra"); TODO */ 294 DPRINTF("pkt->car=%d\n", pkt->car); 295 DPRINTF("pkt->cdr=%d\n", pkt->cdr); 296 DPRINTF("pkt->error=%d\n", pkt->error); 297} 298#endif /* XNB_DEBUG */ 299 300static void 301xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) 302{ 303 if (txreq != NULL) { 304 DPRINTF("netif_tx_request index =%u\n", idx); 305 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); 306 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); 307 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); 308 DPRINTF("netif_tx_request.id =%hu\n", txreq->id); 309 DPRINTF("netif_tx_request.size =%hu\n", txreq->size); 310 } 311} 312 313 314/** 315 * \brief Configuration data for a shared memory request ring 316 * used to communicate with the front-end client of this 317 * this driver. 318 */ 319struct xnb_ring_config { 320 /** 321 * Runtime structures for ring access. Unfortunately, TX and RX rings 322 * use different data structures, and that cannot be changed since it 323 * is part of the interdomain protocol. 324 */ 325 union{ 326 netif_rx_back_ring_t rx_ring; 327 netif_tx_back_ring_t tx_ring; 328 } back_ring; 329 330 /** 331 * The device bus address returned by the hypervisor when 332 * mapping the ring and required to unmap it when a connection 333 * is torn down. 334 */ 335 uint64_t bus_addr; 336 337 /** The pseudo-physical address where ring memory is mapped.*/ 338 uint64_t gnt_addr; 339 340 /** KVA address where ring memory is mapped. */ 341 vm_offset_t va; 342 343 /** 344 * Grant table handles, one per-ring page, returned by the 345 * hyperpervisor upon mapping of the ring and required to 346 * unmap it when a connection is torn down. 347 */ 348 grant_handle_t handle; 349 350 /** The number of ring pages mapped for the current connection. */ 351 unsigned ring_pages; 352 353 /** 354 * The grant references, one per-ring page, supplied by the 355 * front-end, allowing us to reference the ring pages in the 356 * front-end's domain and to map these pages into our own domain. 357 */ 358 grant_ref_t ring_ref; 359}; 360 361/** 362 * Per-instance connection state flags. 363 */ 364typedef enum 365{ 366 /** Communication with the front-end has been established. */ 367 XNBF_RING_CONNECTED = 0x01, 368 369 /** 370 * Front-end requests exist in the ring and are waiting for 371 * xnb_xen_req objects to free up. 372 */ 373 XNBF_RESOURCE_SHORTAGE = 0x02, 374 375 /** Connection teardown has started. */ 376 XNBF_SHUTDOWN = 0x04, 377 378 /** A thread is already performing shutdown processing. */ 379 XNBF_IN_SHUTDOWN = 0x08 380} xnb_flag_t; 381 382/** 383 * Types of rings. Used for array indices and to identify a ring's control 384 * data structure type 385 */ 386typedef enum{ 387 XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ 388 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ 389 XNB_NUM_RING_TYPES 390} xnb_ring_type_t; 391 392/** 393 * Per-instance configuration data. 394 */ 395struct xnb_softc { 396 /** NewBus device corresponding to this instance. */ 397 device_t dev; 398 399 /* Media related fields */ 400 401 /** Generic network media state */ 402 struct ifmedia sc_media; 403 404 /** Media carrier info */ 405 struct ifnet *xnb_ifp; 406 407 /** Our own private carrier state */ 408 unsigned carrier; 409 410 /** Device MAC Address */ 411 uint8_t mac[ETHER_ADDR_LEN]; 412 413 /* Xen related fields */ 414 415 /** 416 * \brief The netif protocol abi in effect. 417 * 418 * There are situations where the back and front ends can 419 * have a different, native abi (e.g. intel x86_64 and 420 * 32bit x86 domains on the same machine). The back-end 421 * always accomodates the front-end's native abi. That 422 * value is pulled from the XenStore and recorded here. 423 */ 424 int abi; 425 426 /** 427 * Name of the bridge to which this VIF is connected, if any 428 * This field is dynamically allocated by xenbus and must be free()ed 429 * when no longer needed 430 */ 431 char *bridge; 432 433 /** The interrupt driven even channel used to signal ring events. */ 434 evtchn_port_t evtchn; 435 436 /** Xen device handle.*/ 437 long handle; 438 439 /** Handle to the communication ring event channel. */ 440 xen_intr_handle_t xen_intr_handle; 441 442 /** 443 * \brief Cached value of the front-end's domain id. 444 * 445 * This value is used at once for each mapped page in 446 * a transaction. We cache it to avoid incuring the 447 * cost of an ivar access every time this is needed. 448 */ 449 domid_t otherend_id; 450 451 /** 452 * Undocumented frontend feature. Has something to do with 453 * scatter/gather IO 454 */ 455 uint8_t can_sg; 456 /** Undocumented frontend feature */ 457 uint8_t gso; 458 /** Undocumented frontend feature */ 459 uint8_t gso_prefix; 460 /** Can checksum TCP/UDP over IPv4 */ 461 uint8_t ip_csum; 462 463 /* Implementation related fields */ 464 /** 465 * Preallocated grant table copy descriptor for RX operations. 466 * Access must be protected by rx_lock 467 */ 468 gnttab_copy_table rx_gnttab; 469 470 /** 471 * Preallocated grant table copy descriptor for TX operations. 472 * Access must be protected by tx_lock 473 */ 474 gnttab_copy_table tx_gnttab; 475
|
844 return (0); 845} 846 847/** 848 * Collect information from the XenStore related to our device and its frontend 849 * 850 * \param xnb Per-instance xnb configuration structure. 851 */ 852static int 853xnb_collect_xenstore_info(struct xnb_softc *xnb) 854{ 855 /** 856 * \todo Linux collects the following info. We should collect most 857 * of this, too: 858 * "feature-rx-notify" 859 */ 860 const char *otherend_path; 861 const char *our_path; 862 int err; 863 unsigned int rx_copy, bridge_len; 864 uint8_t no_csum_offload; 865 866 otherend_path = xenbus_get_otherend_path(xnb->dev); 867 our_path = xenbus_get_node(xnb->dev); 868 869 /* Collect the critical communication parameters */ 870 err = xs_gather(XST_NIL, otherend_path, 871 "tx-ring-ref", "%l" PRIu32, 872 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, 873 "rx-ring-ref", "%l" PRIu32, 874 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, 875 "event-channel", "%" PRIu32, &xnb->evtchn, 876 NULL); 877 if (err != 0) { 878 xenbus_dev_fatal(xnb->dev, err, 879 "Unable to retrieve ring information from " 880 "frontend %s. Unable to connect.", 881 otherend_path); 882 return (err); 883 } 884 885 /* Collect the handle from xenstore */ 886 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); 887 if (err != 0) { 888 xenbus_dev_fatal(xnb->dev, err, 889 "Error reading handle from frontend %s. " 890 "Unable to connect.", otherend_path); 891 } 892 893 /* 894 * Collect the bridgename, if any. We do not need bridge_len; we just 895 * throw it away 896 */ 897 err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, 898 (void**)&xnb->bridge); 899 if (err != 0) 900 xnb->bridge = NULL; 901 902 /* 903 * Does the frontend request that we use rx copy? If not, return an 904 * error because this driver only supports rx copy. 905 */ 906 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, 907 "%" PRIu32, &rx_copy); 908 if (err == ENOENT) { 909 err = 0; 910 rx_copy = 0; 911 } 912 if (err < 0) { 913 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", 914 otherend_path); 915 return err; 916 } 917 /** 918 * \todo: figure out the exact meaning of this feature, and when 919 * the frontend will set it to true. It should be set to true 920 * at some point 921 */ 922/* if (!rx_copy)*/ 923/* return EOPNOTSUPP;*/ 924 925 /** \todo Collect the rx notify feature */ 926 927 /* Collect the feature-sg. */ 928 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, 929 "%hhu", &xnb->can_sg) < 0) 930 xnb->can_sg = 0; 931 932 /* Collect remaining frontend features */ 933 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, 934 "%hhu", &xnb->gso) < 0) 935 xnb->gso = 0; 936 937 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, 938 "%hhu", &xnb->gso_prefix) < 0) 939 xnb->gso_prefix = 0; 940 941 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, 942 "%hhu", &no_csum_offload) < 0) 943 no_csum_offload = 0; 944 xnb->ip_csum = (no_csum_offload == 0); 945 946 return (0); 947} 948 949/** 950 * Supply information about the physical device to the frontend 951 * via XenBus. 952 * 953 * \param xnb Per-instance xnb configuration structure. 954 */ 955static int 956xnb_publish_backend_info(struct xnb_softc *xnb) 957{ 958 struct xs_transaction xst; 959 const char *our_path; 960 int error; 961 962 our_path = xenbus_get_node(xnb->dev); 963 964 do { 965 error = xs_transaction_start(&xst); 966 if (error != 0) { 967 xenbus_dev_fatal(xnb->dev, error, 968 "Error publishing backend info " 969 "(start transaction)"); 970 break; 971 } 972 973 error = xs_printf(xst, our_path, "feature-sg", 974 "%d", XNB_SG); 975 if (error != 0) 976 break; 977 978 error = xs_printf(xst, our_path, "feature-gso-tcpv4", 979 "%d", XNB_GSO_TCPV4); 980 if (error != 0) 981 break; 982 983 error = xs_printf(xst, our_path, "feature-rx-copy", 984 "%d", XNB_RX_COPY); 985 if (error != 0) 986 break; 987 988 error = xs_printf(xst, our_path, "feature-rx-flip", 989 "%d", XNB_RX_FLIP); 990 if (error != 0) 991 break; 992 993 error = xs_transaction_end(xst, 0); 994 if (error != 0 && error != EAGAIN) { 995 xenbus_dev_fatal(xnb->dev, error, "ending transaction"); 996 break; 997 } 998 999 } while (error == EAGAIN); 1000 1001 return (error); 1002} 1003 1004/** 1005 * Connect to our netfront peer now that it has completed publishing 1006 * its configuration into the XenStore. 1007 * 1008 * \param xnb Per-instance xnb configuration structure. 1009 */ 1010static void 1011xnb_connect(struct xnb_softc *xnb) 1012{ 1013 int error; 1014 1015 if (xenbus_get_state(xnb->dev) == XenbusStateConnected) 1016 return; 1017 1018 if (xnb_collect_xenstore_info(xnb) != 0) 1019 return; 1020 1021 xnb->flags &= ~XNBF_SHUTDOWN; 1022 1023 /* Read front end configuration. */ 1024 1025 /* Allocate resources whose size depends on front-end configuration. */ 1026 error = xnb_alloc_communication_mem(xnb); 1027 if (error != 0) { 1028 xenbus_dev_fatal(xnb->dev, error, 1029 "Unable to allocate communication memory"); 1030 return; 1031 } 1032 1033 /* 1034 * Connect communication channel. 1035 */ 1036 error = xnb_connect_comms(xnb); 1037 if (error != 0) { 1038 /* Specific errors are reported by xnb_connect_comms(). */ 1039 return; 1040 } 1041 xnb->carrier = 1; 1042 1043 /* Ready for I/O. */ 1044 xenbus_set_state(xnb->dev, XenbusStateConnected); 1045} 1046 1047/*-------------------------- Device Teardown Support -------------------------*/ 1048/** 1049 * Perform device shutdown functions. 1050 * 1051 * \param xnb Per-instance xnb configuration structure. 1052 * 1053 * Mark this instance as shutting down, wait for any active requests 1054 * to drain, disconnect from the front-end, and notify any waiters (e.g. 1055 * a thread invoking our detach method) that detach can now proceed. 1056 */ 1057static int 1058xnb_shutdown(struct xnb_softc *xnb) 1059{ 1060 /* 1061 * Due to the need to drop our mutex during some 1062 * xenbus operations, it is possible for two threads 1063 * to attempt to close out shutdown processing at 1064 * the same time. Tell the caller that hits this 1065 * race to try back later. 1066 */ 1067 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) 1068 return (EAGAIN); 1069 1070 xnb->flags |= XNBF_SHUTDOWN; 1071 1072 xnb->flags |= XNBF_IN_SHUTDOWN; 1073 1074 mtx_unlock(&xnb->sc_lock); 1075 /* Free the network interface */ 1076 xnb->carrier = 0; 1077 if (xnb->xnb_ifp != NULL) { 1078 ether_ifdetach(xnb->xnb_ifp); 1079 if_free(xnb->xnb_ifp); 1080 xnb->xnb_ifp = NULL; 1081 } 1082 mtx_lock(&xnb->sc_lock); 1083 1084 xnb_disconnect(xnb); 1085 1086 mtx_unlock(&xnb->sc_lock); 1087 if (xenbus_get_state(xnb->dev) < XenbusStateClosing) 1088 xenbus_set_state(xnb->dev, XenbusStateClosing); 1089 mtx_lock(&xnb->sc_lock); 1090 1091 xnb->flags &= ~XNBF_IN_SHUTDOWN; 1092 1093 1094 /* Indicate to xnb_detach() that is it safe to proceed. */ 1095 wakeup(xnb); 1096 1097 return (0); 1098} 1099 1100/** 1101 * Report an attach time error to the console and Xen, and cleanup 1102 * this instance by forcing immediate detach processing. 1103 * 1104 * \param xnb Per-instance xnb configuration structure. 1105 * \param err Errno describing the error. 1106 * \param fmt Printf style format and arguments 1107 */ 1108static void 1109xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) 1110{ 1111 va_list ap; 1112 va_list ap_hotplug; 1113 1114 va_start(ap, fmt); 1115 va_copy(ap_hotplug, ap); 1116 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), 1117 "hotplug-error", fmt, ap_hotplug); 1118 va_end(ap_hotplug); 1119 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1120 "hotplug-status", "error"); 1121 1122 xenbus_dev_vfatal(xnb->dev, err, fmt, ap); 1123 va_end(ap); 1124 1125 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1126 "online", "0"); 1127 xnb_detach(xnb->dev); 1128} 1129 1130/*---------------------------- NewBus Entrypoints ----------------------------*/ 1131/** 1132 * Inspect a XenBus device and claim it if is of the appropriate type. 1133 * 1134 * \param dev NewBus device object representing a candidate XenBus device. 1135 * 1136 * \return 0 for success, errno codes for failure. 1137 */ 1138static int 1139xnb_probe(device_t dev) 1140{ 1141 if (!strcmp(xenbus_get_type(dev), "vif")) { 1142 DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), 1143 devclass_get_name(device_get_devclass(dev))); 1144 device_set_desc(dev, "Backend Virtual Network Device"); 1145 device_quiet(dev); 1146 return (0); 1147 } 1148 return (ENXIO); 1149} 1150 1151/** 1152 * Setup sysctl variables to control various Network Back parameters. 1153 * 1154 * \param xnb Xen Net Back softc. 1155 * 1156 */ 1157static void 1158xnb_setup_sysctl(struct xnb_softc *xnb) 1159{ 1160 struct sysctl_ctx_list *sysctl_ctx = NULL; 1161 struct sysctl_oid *sysctl_tree = NULL; 1162 1163 sysctl_ctx = device_get_sysctl_ctx(xnb->dev); 1164 if (sysctl_ctx == NULL) 1165 return; 1166 1167 sysctl_tree = device_get_sysctl_tree(xnb->dev); 1168 if (sysctl_tree == NULL) 1169 return; 1170 1171#ifdef XNB_DEBUG 1172 SYSCTL_ADD_PROC(sysctl_ctx, 1173 SYSCTL_CHILDREN(sysctl_tree), 1174 OID_AUTO, 1175 "unit_test_results", 1176 CTLTYPE_STRING | CTLFLAG_RD, 1177 xnb, 1178 0, 1179 xnb_unit_test_main, 1180 "A", 1181 "Results of builtin unit tests"); 1182 1183 SYSCTL_ADD_PROC(sysctl_ctx, 1184 SYSCTL_CHILDREN(sysctl_tree), 1185 OID_AUTO, 1186 "dump_rings", 1187 CTLTYPE_STRING | CTLFLAG_RD, 1188 xnb, 1189 0, 1190 xnb_dump_rings, 1191 "A", 1192 "Xennet Back Rings"); 1193#endif /* XNB_DEBUG */ 1194} 1195 1196/** 1197 * Create a network device. 1198 * @param handle device handle 1199 */ 1200int 1201create_netdev(device_t dev) 1202{ 1203 struct ifnet *ifp; 1204 struct xnb_softc *xnb; 1205 int err = 0; 1206 uint32_t handle; 1207 1208 xnb = device_get_softc(dev); 1209 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); 1210 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); 1211 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); 1212 1213 xnb->dev = dev; 1214 1215 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); 1216 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1217 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); 1218 1219 /* 1220 * Set the MAC address to a dummy value (00:00:00:00:00), 1221 * if the MAC address of the host-facing interface is set 1222 * to the same as the guest-facing one (the value found in 1223 * xenstore), the bridge would stop delivering packets to 1224 * us because it would see that the destination address of 1225 * the packet is the same as the interface, and so the bridge 1226 * would expect the packet has already been delivered locally 1227 * (and just drop it). 1228 */ 1229 bzero(&xnb->mac[0], sizeof(xnb->mac)); 1230 1231 /* The interface will be named using the following nomenclature: 1232 * 1233 * xnb<domid>.<handle> 1234 * 1235 * Where handle is the oder of the interface referred to the guest. 1236 */ 1237 err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, 1238 "%" PRIu32, &handle); 1239 if (err != 0) 1240 return (err); 1241 snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, 1242 xenbus_get_otherend_id(dev), handle); 1243 1244 if (err == 0) { 1245 /* Set up ifnet structure */ 1246 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); 1247 ifp->if_softc = xnb; 1248 if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); 1249 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1250 ifp->if_ioctl = xnb_ioctl; 1251 ifp->if_output = ether_output; 1252 ifp->if_start = xnb_start; 1253#ifdef notyet 1254 ifp->if_watchdog = xnb_watchdog; 1255#endif 1256 ifp->if_init = xnb_ifinit; 1257 ifp->if_mtu = ETHERMTU; 1258 ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; 1259 1260 ifp->if_hwassist = XNB_CSUM_FEATURES; 1261 ifp->if_capabilities = IFCAP_HWCSUM; 1262 ifp->if_capenable = IFCAP_HWCSUM; 1263 1264 ether_ifattach(ifp, xnb->mac); 1265 xnb->carrier = 0; 1266 } 1267 1268 return err; 1269} 1270 1271/** 1272 * Attach to a XenBus device that has been claimed by our probe routine. 1273 * 1274 * \param dev NewBus device object representing this Xen Net Back instance. 1275 * 1276 * \return 0 for success, errno codes for failure. 1277 */ 1278static int 1279xnb_attach(device_t dev) 1280{ 1281 struct xnb_softc *xnb; 1282 int error; 1283 xnb_ring_type_t i; 1284 1285 error = create_netdev(dev); 1286 if (error != 0) { 1287 xenbus_dev_fatal(dev, error, "creating netdev"); 1288 return (error); 1289 } 1290 1291 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 1292 1293 /* 1294 * Basic initialization. 1295 * After this block it is safe to call xnb_detach() 1296 * to clean up any allocated data for this instance. 1297 */ 1298 xnb = device_get_softc(dev); 1299 xnb->otherend_id = xenbus_get_otherend_id(dev); 1300 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 1301 xnb->ring_configs[i].ring_pages = 1; 1302 } 1303 1304 /* 1305 * Setup sysctl variables. 1306 */ 1307 xnb_setup_sysctl(xnb); 1308 1309 /* Update hot-plug status to satisfy xend. */ 1310 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1311 "hotplug-status", "connected"); 1312 if (error != 0) { 1313 xnb_attach_failed(xnb, error, "writing %s/hotplug-status", 1314 xenbus_get_node(xnb->dev)); 1315 return (error); 1316 } 1317 1318 if ((error = xnb_publish_backend_info(xnb)) != 0) { 1319 /* 1320 * If we can't publish our data, we cannot participate 1321 * in this connection, and waiting for a front-end state 1322 * change will not help the situation. 1323 */ 1324 xnb_attach_failed(xnb, error, 1325 "Publishing backend status for %s", 1326 xenbus_get_node(xnb->dev)); 1327 return error; 1328 } 1329 1330 /* Tell the front end that we are ready to connect. */ 1331 xenbus_set_state(dev, XenbusStateInitWait); 1332 1333 return (0); 1334} 1335 1336/** 1337 * Detach from a net back device instance. 1338 * 1339 * \param dev NewBus device object representing this Xen Net Back instance. 1340 * 1341 * \return 0 for success, errno codes for failure. 1342 * 1343 * \note A net back device may be detached at any time in its life-cycle, 1344 * including part way through the attach process. For this reason, 1345 * initialization order and the intialization state checks in this 1346 * routine must be carefully coupled so that attach time failures 1347 * are gracefully handled. 1348 */ 1349static int 1350xnb_detach(device_t dev) 1351{ 1352 struct xnb_softc *xnb; 1353 1354 DPRINTF("\n"); 1355 1356 xnb = device_get_softc(dev); 1357 mtx_lock(&xnb->sc_lock); 1358 while (xnb_shutdown(xnb) == EAGAIN) { 1359 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, 1360 "xnb_shutdown", 0); 1361 } 1362 mtx_unlock(&xnb->sc_lock); 1363 DPRINTF("\n"); 1364 1365 mtx_destroy(&xnb->tx_lock); 1366 mtx_destroy(&xnb->rx_lock); 1367 mtx_destroy(&xnb->sc_lock); 1368 return (0); 1369} 1370 1371/** 1372 * Prepare this net back device for suspension of this VM. 1373 * 1374 * \param dev NewBus device object representing this Xen net Back instance. 1375 * 1376 * \return 0 for success, errno codes for failure. 1377 */ 1378static int 1379xnb_suspend(device_t dev) 1380{ 1381 return (0); 1382} 1383 1384/** 1385 * Perform any processing required to recover from a suspended state. 1386 * 1387 * \param dev NewBus device object representing this Xen Net Back instance. 1388 * 1389 * \return 0 for success, errno codes for failure. 1390 */ 1391static int 1392xnb_resume(device_t dev) 1393{ 1394 return (0); 1395} 1396 1397/** 1398 * Handle state changes expressed via the XenStore by our front-end peer. 1399 * 1400 * \param dev NewBus device object representing this Xen 1401 * Net Back instance. 1402 * \param frontend_state The new state of the front-end. 1403 * 1404 * \return 0 for success, errno codes for failure. 1405 */ 1406static void 1407xnb_frontend_changed(device_t dev, XenbusState frontend_state) 1408{ 1409 struct xnb_softc *xnb; 1410 1411 xnb = device_get_softc(dev); 1412 1413 DPRINTF("frontend_state=%s, xnb_state=%s\n", 1414 xenbus_strstate(frontend_state), 1415 xenbus_strstate(xenbus_get_state(xnb->dev))); 1416 1417 switch (frontend_state) { 1418 case XenbusStateInitialising: 1419 break; 1420 case XenbusStateInitialised: 1421 case XenbusStateConnected: 1422 xnb_connect(xnb); 1423 break; 1424 case XenbusStateClosing: 1425 case XenbusStateClosed: 1426 mtx_lock(&xnb->sc_lock); 1427 xnb_shutdown(xnb); 1428 mtx_unlock(&xnb->sc_lock); 1429 if (frontend_state == XenbusStateClosed) 1430 xenbus_set_state(xnb->dev, XenbusStateClosed); 1431 break; 1432 default: 1433 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", 1434 frontend_state); 1435 break; 1436 } 1437} 1438 1439 1440/*---------------------------- Request Processing ----------------------------*/ 1441/** 1442 * Interrupt handler bound to the shared ring's event channel. 1443 * Entry point for the xennet transmit path in netback 1444 * Transfers packets from the Xen ring to the host's generic networking stack 1445 * 1446 * \param arg Callback argument registerd during event channel 1447 * binding - the xnb_softc for this instance. 1448 */ 1449static void 1450xnb_intr(void *arg) 1451{ 1452 struct xnb_softc *xnb; 1453 struct ifnet *ifp; 1454 netif_tx_back_ring_t *txb; 1455 RING_IDX req_prod_local; 1456 1457 xnb = (struct xnb_softc *)arg; 1458 ifp = xnb->xnb_ifp; 1459 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 1460 1461 mtx_lock(&xnb->tx_lock); 1462 do { 1463 int notify; 1464 req_prod_local = txb->sring->req_prod; 1465 xen_rmb(); 1466 1467 for (;;) { 1468 struct mbuf *mbufc; 1469 int err; 1470 1471 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, 1472 xnb->tx_gnttab); 1473 if (err || (mbufc == NULL)) 1474 break; 1475 1476 /* Send the packet to the generic network stack */ 1477 (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); 1478 } 1479 1480 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); 1481 if (notify != 0) 1482 xen_intr_signal(xnb->xen_intr_handle); 1483 1484 txb->sring->req_event = txb->req_cons + 1; 1485 xen_mb(); 1486 } while (txb->sring->req_prod != req_prod_local) ; 1487 mtx_unlock(&xnb->tx_lock); 1488 1489 xnb_start(ifp); 1490} 1491 1492 1493/** 1494 * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. 1495 * Will read exactly 0 or 1 packets from the ring; never a partial packet. 1496 * \param[out] pkt The returned packet. If there is an error building 1497 * the packet, pkt.list_len will be set to 0. 1498 * \param[in] tx_ring Pointer to the Ring that is the input to this function 1499 * \param[in] start The ring index of the first potential request 1500 * \return The number of requests consumed to build this packet 1501 */ 1502static int 1503xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, 1504 RING_IDX start) 1505{ 1506 /* 1507 * Outline: 1508 * 1) Initialize pkt 1509 * 2) Read the first request of the packet 1510 * 3) Read the extras 1511 * 4) Set cdr 1512 * 5) Loop on the remainder of the packet 1513 * 6) Finalize pkt (stuff like car_size and list_len) 1514 */ 1515 int idx = start; 1516 int discard = 0; /* whether to discard the packet */ 1517 int more_data = 0; /* there are more request past the last one */ 1518 uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ 1519 1520 xnb_pkt_initialize(pkt); 1521 1522 /* Read the first request */ 1523 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1524 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1525 pkt->size = tx->size; 1526 pkt->flags = tx->flags & ~NETTXF_more_data; 1527 more_data = tx->flags & NETTXF_more_data; 1528 pkt->list_len++; 1529 pkt->car = idx; 1530 idx++; 1531 } 1532 1533 /* Read the extra info */ 1534 if ((pkt->flags & NETTXF_extra_info) && 1535 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1536 netif_extra_info_t *ext = 1537 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); 1538 pkt->extra.type = ext->type; 1539 switch (pkt->extra.type) { 1540 case XEN_NETIF_EXTRA_TYPE_GSO: 1541 pkt->extra.u.gso = ext->u.gso; 1542 break; 1543 default: 1544 /* 1545 * The reference Linux netfront driver will 1546 * never set any other extra.type. So we don't 1547 * know what to do with it. Let's print an 1548 * error, then consume and discard the packet 1549 */ 1550 printf("xnb(%s:%d): Unknown extra info type %d." 1551 " Discarding packet\n", 1552 __func__, __LINE__, pkt->extra.type); 1553 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, 1554 start)); 1555 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, 1556 idx)); 1557 discard = 1; 1558 break; 1559 } 1560 1561 pkt->extra.flags = ext->flags; 1562 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { 1563 /* 1564 * The reference linux netfront driver never sets this 1565 * flag (nor does any other known netfront). So we 1566 * will discard the packet. 1567 */ 1568 printf("xnb(%s:%d): Request sets " 1569 "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " 1570 "that\n", __func__, __LINE__); 1571 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1572 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1573 discard = 1; 1574 } 1575 1576 idx++; 1577 } 1578 1579 /* Set cdr. If there is not more data, cdr is invalid */ 1580 pkt->cdr = idx; 1581 1582 /* Loop on remainder of packet */ 1583 while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1584 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1585 pkt->list_len++; 1586 cdr_size += tx->size; 1587 if (tx->flags & ~NETTXF_more_data) { 1588 /* There should be no other flags set at this point */ 1589 printf("xnb(%s:%d): Request sets unknown flags %d " 1590 "after the 1st request in the packet.\n", 1591 __func__, __LINE__, tx->flags); 1592 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1593 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1594 } 1595 1596 more_data = tx->flags & NETTXF_more_data; 1597 idx++; 1598 } 1599 1600 /* Finalize packet */ 1601 if (more_data != 0) { 1602 /* The ring ran out of requests before finishing the packet */ 1603 xnb_pkt_invalidate(pkt); 1604 idx = start; /* tell caller that we consumed no requests */ 1605 } else { 1606 /* Calculate car_size */ 1607 pkt->car_size = pkt->size - cdr_size; 1608 } 1609 if (discard != 0) { 1610 xnb_pkt_invalidate(pkt); 1611 } 1612 1613 return idx - start; 1614} 1615 1616 1617/** 1618 * Respond to all the requests that constituted pkt. Builds the responses and 1619 * writes them to the ring, but doesn't push them to the shared ring. 1620 * \param[in] pkt the packet that needs a response 1621 * \param[in] error true if there was an error handling the packet, such 1622 * as in the hypervisor copy op or mbuf allocation 1623 * \param[out] ring Responses go here 1624 */ 1625static void 1626xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, 1627 int error) 1628{ 1629 /* 1630 * Outline: 1631 * 1) Respond to the first request 1632 * 2) Respond to the extra info reques 1633 * Loop through every remaining request in the packet, generating 1634 * responses that copy those requests' ids and sets the status 1635 * appropriately. 1636 */ 1637 netif_tx_request_t *tx; 1638 netif_tx_response_t *rsp; 1639 int i; 1640 uint16_t status; 1641 1642 status = (xnb_pkt_is_valid(pkt) == 0) || error ? 1643 NETIF_RSP_ERROR : NETIF_RSP_OKAY; 1644 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), 1645 ("Cannot respond to ring requests out of order")); 1646 1647 if (pkt->list_len >= 1) { 1648 uint16_t id; 1649 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1650 id = tx->id; 1651 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1652 rsp->id = id; 1653 rsp->status = status; 1654 ring->rsp_prod_pvt++; 1655 1656 if (pkt->flags & NETRXF_extra_info) { 1657 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1658 rsp->status = NETIF_RSP_NULL; 1659 ring->rsp_prod_pvt++; 1660 } 1661 } 1662 1663 for (i=0; i < pkt->list_len - 1; i++) { 1664 uint16_t id; 1665 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1666 id = tx->id; 1667 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1668 rsp->id = id; 1669 rsp->status = status; 1670 ring->rsp_prod_pvt++; 1671 } 1672} 1673 1674/** 1675 * Create an mbuf chain to represent a packet. Initializes all of the headers 1676 * in the mbuf chain, but does not copy the data. The returned chain must be 1677 * free()'d when no longer needed 1678 * \param[in] pkt A packet to model the mbuf chain after 1679 * \return A newly allocated mbuf chain, possibly with clusters attached. 1680 * NULL on failure 1681 */ 1682static struct mbuf* 1683xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) 1684{ 1685 /** 1686 * \todo consider using a memory pool for mbufs instead of 1687 * reallocating them for every packet 1688 */ 1689 /** \todo handle extra data */ 1690 struct mbuf *m; 1691 1692 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); 1693 1694 if (m != NULL) { 1695 m->m_pkthdr.rcvif = ifp; 1696 if (pkt->flags & NETTXF_data_validated) { 1697 /* 1698 * We lie to the host OS and always tell it that the 1699 * checksums are ok, because the packet is unlikely to 1700 * get corrupted going across domains. 1701 */ 1702 m->m_pkthdr.csum_flags = ( 1703 CSUM_IP_CHECKED | 1704 CSUM_IP_VALID | 1705 CSUM_DATA_VALID | 1706 CSUM_PSEUDO_HDR 1707 ); 1708 m->m_pkthdr.csum_data = 0xffff; 1709 } 1710 } 1711 return m; 1712} 1713 1714/** 1715 * Build a gnttab_copy table that can be used to copy data from a pkt 1716 * to an mbufc. Does not actually perform the copy. Always uses gref's on 1717 * the packet side. 1718 * \param[in] pkt pkt's associated requests form the src for 1719 * the copy operation 1720 * \param[in] mbufc mbufc's storage forms the dest for the copy operation 1721 * \param[out] gnttab Storage for the returned grant table 1722 * \param[in] txb Pointer to the backend ring structure 1723 * \param[in] otherend_id The domain ID of the other end of the copy 1724 * \return The number of gnttab entries filled 1725 */ 1726static int 1727xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1728 gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, 1729 domid_t otherend_id) 1730{ 1731 1732 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1733 int gnt_idx = 0; /* index into grant table */ 1734 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ 1735 int r_ofs = 0; /* offset of next data within tx request's data area */ 1736 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1737 /* size in bytes that still needs to be represented in the table */ 1738 uint16_t size_remaining = pkt->size; 1739 1740 while (size_remaining > 0) { 1741 const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); 1742 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; 1743 const size_t req_size = 1744 r_idx == pkt->car ? pkt->car_size : txq->size; 1745 const size_t pkt_space = req_size - r_ofs; 1746 /* 1747 * space is the largest amount of data that can be copied in the 1748 * grant table's next entry 1749 */ 1750 const size_t space = MIN(pkt_space, mbuf_space); 1751 1752 /* TODO: handle this error condition without panicking */ 1753 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1754 1755 gnttab[gnt_idx].source.u.ref = txq->gref; 1756 gnttab[gnt_idx].source.domid = otherend_id; 1757 gnttab[gnt_idx].source.offset = txq->offset + r_ofs; 1758 gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( 1759 mtod(mbuf, vm_offset_t) + m_ofs); 1760 gnttab[gnt_idx].dest.offset = virt_to_offset( 1761 mtod(mbuf, vm_offset_t) + m_ofs); 1762 gnttab[gnt_idx].dest.domid = DOMID_SELF; 1763 gnttab[gnt_idx].len = space; 1764 gnttab[gnt_idx].flags = GNTCOPY_source_gref; 1765 1766 gnt_idx++; 1767 r_ofs += space; 1768 m_ofs += space; 1769 size_remaining -= space; 1770 if (req_size - r_ofs <= 0) { 1771 /* Must move to the next tx request */ 1772 r_ofs = 0; 1773 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 1774 } 1775 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { 1776 /* Must move to the next mbuf */ 1777 m_ofs = 0; 1778 mbuf = mbuf->m_next; 1779 } 1780 } 1781 1782 return gnt_idx; 1783} 1784 1785/** 1786 * Check the status of the grant copy operations, and update mbufs various 1787 * non-data fields to reflect the data present. 1788 * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of 1789 * the correct length, and data should already be present 1790 * \param[in] gnttab A grant table for a just completed copy op 1791 * \param[in] n_entries The number of valid entries in the grant table 1792 */ 1793static void 1794xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, 1795 int n_entries) 1796{ 1797 struct mbuf *mbuf = mbufc; 1798 int i; 1799 size_t total_size = 0; 1800 1801 for (i = 0; i < n_entries; i++) { 1802 KASSERT(gnttab[i].status == GNTST_okay, 1803 ("Some gnttab_copy entry had error status %hd\n", 1804 gnttab[i].status)); 1805 1806 mbuf->m_len += gnttab[i].len; 1807 total_size += gnttab[i].len; 1808 if (M_TRAILINGSPACE(mbuf) <= 0) { 1809 mbuf = mbuf->m_next; 1810 } 1811 } 1812 mbufc->m_pkthdr.len = total_size; 1813 1814#if defined(INET) || defined(INET6) 1815 xnb_add_mbuf_cksum(mbufc); 1816#endif 1817} 1818 1819/** 1820 * Dequeue at most one packet from the shared ring 1821 * \param[in,out] txb Netif tx ring. A packet will be removed from it, and 1822 * its private indices will be updated. But the indices 1823 * will not be pushed to the shared ring. 1824 * \param[in] ifnet Interface to which the packet will be sent 1825 * \param[in] otherend Domain ID of the other end of the ring 1826 * \param[out] mbufc The assembled mbuf chain, ready to send to the generic 1827 * networking stack 1828 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 1829 * this a function parameter so that we will take less 1830 * stack space. 1831 * \return An error code 1832 */ 1833static int 1834xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, 1835 struct ifnet *ifnet, gnttab_copy_table gnttab) 1836{ 1837 struct xnb_pkt pkt; 1838 /* number of tx requests consumed to build the last packet */ 1839 int num_consumed; 1840 int nr_ents; 1841 1842 *mbufc = NULL; 1843 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); 1844 if (num_consumed == 0) 1845 return 0; /* Nothing to receive */ 1846 1847 /* update statistics independent of errors */ 1848 if_inc_counter(ifnet, IFCOUNTER_IPACKETS, 1); 1849 1850 /* 1851 * if we got here, then 1 or more requests was consumed, but the packet 1852 * is not necessarily valid. 1853 */ 1854 if (xnb_pkt_is_valid(&pkt) == 0) { 1855 /* got a garbage packet, respond and drop it */ 1856 xnb_txpkt2rsp(&pkt, txb, 1); 1857 txb->req_cons += num_consumed; 1858 DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", 1859 num_consumed); 1860 if_inc_counter(ifnet, IFCOUNTER_IERRORS, 1); 1861 return EINVAL; 1862 } 1863 1864 *mbufc = xnb_pkt2mbufc(&pkt, ifnet); 1865 1866 if (*mbufc == NULL) { 1867 /* 1868 * Couldn't allocate mbufs. Respond and drop the packet. Do 1869 * not consume the requests 1870 */ 1871 xnb_txpkt2rsp(&pkt, txb, 1); 1872 DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", 1873 num_consumed); 1874 if_inc_counter(ifnet, IFCOUNTER_IQDROPS, 1); 1875 return ENOMEM; 1876 } 1877 1878 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); 1879 1880 if (nr_ents > 0) { 1881 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 1882 gnttab, nr_ents); 1883 KASSERT(hv_ret == 0, 1884 ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); 1885 xnb_update_mbufc(*mbufc, gnttab, nr_ents); 1886 } 1887 1888 xnb_txpkt2rsp(&pkt, txb, 0); 1889 txb->req_cons += num_consumed; 1890 return 0; 1891} 1892 1893/** 1894 * Create an xnb_pkt based on the contents of an mbuf chain. 1895 * \param[in] mbufc mbuf chain to transform into a packet 1896 * \param[out] pkt Storage for the newly generated xnb_pkt 1897 * \param[in] start The ring index of the first available slot in the rx 1898 * ring 1899 * \param[in] space The number of free slots in the rx ring 1900 * \retval 0 Success 1901 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 1902 * \retval EAGAIN There was not enough space in the ring to queue the 1903 * packet 1904 */ 1905static int 1906xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, 1907 RING_IDX start, int space) 1908{ 1909 1910 int retval = 0; 1911 1912 if ((mbufc == NULL) || 1913 ( (mbufc->m_flags & M_PKTHDR) == 0) || 1914 (mbufc->m_pkthdr.len == 0)) { 1915 xnb_pkt_invalidate(pkt); 1916 retval = EINVAL; 1917 } else { 1918 int slots_required; 1919 1920 xnb_pkt_validate(pkt); 1921 pkt->flags = 0; 1922 pkt->size = mbufc->m_pkthdr.len; 1923 pkt->car = start; 1924 pkt->car_size = mbufc->m_len; 1925 1926 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { 1927 pkt->flags |= NETRXF_extra_info; 1928 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; 1929 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 1930 pkt->extra.u.gso.pad = 0; 1931 pkt->extra.u.gso.features = 0; 1932 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; 1933 pkt->extra.flags = 0; 1934 pkt->cdr = start + 2; 1935 } else { 1936 pkt->cdr = start + 1; 1937 } 1938 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { 1939 pkt->flags |= 1940 (NETRXF_csum_blank | NETRXF_data_validated); 1941 } 1942 1943 /* 1944 * Each ring response can have up to PAGE_SIZE of data. 1945 * Assume that we can defragment the mbuf chain efficiently 1946 * into responses so that each response but the last uses all 1947 * PAGE_SIZE bytes. 1948 */ 1949 pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE; 1950 1951 if (pkt->list_len > 1) { 1952 pkt->flags |= NETRXF_more_data; 1953 } 1954 1955 slots_required = pkt->list_len + 1956 (pkt->flags & NETRXF_extra_info ? 1 : 0); 1957 if (slots_required > space) { 1958 xnb_pkt_invalidate(pkt); 1959 retval = EAGAIN; 1960 } 1961 } 1962 1963 return retval; 1964} 1965 1966/** 1967 * Build a gnttab_copy table that can be used to copy data from an mbuf chain 1968 * to the frontend's shared buffers. Does not actually perform the copy. 1969 * Always uses gref's on the other end's side. 1970 * \param[in] pkt pkt's associated responses form the dest for the copy 1971 * operatoin 1972 * \param[in] mbufc The source for the copy operation 1973 * \param[out] gnttab Storage for the returned grant table 1974 * \param[in] rxb Pointer to the backend ring structure 1975 * \param[in] otherend_id The domain ID of the other end of the copy 1976 * \return The number of gnttab entries filled 1977 */ 1978static int 1979xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1980 gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, 1981 domid_t otherend_id) 1982{ 1983 1984 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1985 int gnt_idx = 0; /* index into grant table */ 1986 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ 1987 int r_ofs = 0; /* offset of next data within rx request's data area */ 1988 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1989 /* size in bytes that still needs to be represented in the table */ 1990 uint16_t size_remaining; 1991 1992 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; 1993 1994 while (size_remaining > 0) { 1995 const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); 1996 const size_t mbuf_space = mbuf->m_len - m_ofs; 1997 /* Xen shared pages have an implied size of PAGE_SIZE */ 1998 const size_t req_size = PAGE_SIZE; 1999 const size_t pkt_space = req_size - r_ofs; 2000 /* 2001 * space is the largest amount of data that can be copied in the 2002 * grant table's next entry 2003 */ 2004 const size_t space = MIN(pkt_space, mbuf_space); 2005 2006 /* TODO: handle this error condition without panicing */ 2007 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 2008 2009 gnttab[gnt_idx].dest.u.ref = rxq->gref; 2010 gnttab[gnt_idx].dest.domid = otherend_id; 2011 gnttab[gnt_idx].dest.offset = r_ofs; 2012 gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( 2013 mtod(mbuf, vm_offset_t) + m_ofs); 2014 gnttab[gnt_idx].source.offset = virt_to_offset( 2015 mtod(mbuf, vm_offset_t) + m_ofs); 2016 gnttab[gnt_idx].source.domid = DOMID_SELF; 2017 gnttab[gnt_idx].len = space; 2018 gnttab[gnt_idx].flags = GNTCOPY_dest_gref; 2019 2020 gnt_idx++; 2021 2022 r_ofs += space; 2023 m_ofs += space; 2024 size_remaining -= space; 2025 if (req_size - r_ofs <= 0) { 2026 /* Must move to the next rx request */ 2027 r_ofs = 0; 2028 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 2029 } 2030 if (mbuf->m_len - m_ofs <= 0) { 2031 /* Must move to the next mbuf */ 2032 m_ofs = 0; 2033 mbuf = mbuf->m_next; 2034 } 2035 } 2036 2037 return gnt_idx; 2038} 2039 2040/** 2041 * Generates responses for all the requests that constituted pkt. Builds 2042 * responses and writes them to the ring, but doesn't push the shared ring 2043 * indices. 2044 * \param[in] pkt the packet that needs a response 2045 * \param[in] gnttab The grant copy table corresponding to this packet. 2046 * Used to determine how many rsp->netif_rx_response_t's to 2047 * generate. 2048 * \param[in] n_entries Number of relevant entries in the grant table 2049 * \param[out] ring Responses go here 2050 * \return The number of RX requests that were consumed to generate 2051 * the responses 2052 */ 2053static int 2054xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, 2055 int n_entries, netif_rx_back_ring_t *ring) 2056{ 2057 /* 2058 * This code makes the following assumptions: 2059 * * All entries in gnttab set GNTCOPY_dest_gref 2060 * * The entries in gnttab are grouped by their grefs: any two 2061 * entries with the same gref must be adjacent 2062 */ 2063 int error = 0; 2064 int gnt_idx, i; 2065 int n_responses = 0; 2066 grant_ref_t last_gref = GRANT_REF_INVALID; 2067 RING_IDX r_idx; 2068 2069 KASSERT(gnttab != NULL, ("Received a null granttable copy")); 2070 2071 /* 2072 * In the event of an error, we only need to send one response to the 2073 * netfront. In that case, we musn't write any data to the responses 2074 * after the one we send. So we must loop all the way through gnttab 2075 * looking for errors before we generate any responses 2076 * 2077 * Since we're looping through the grant table anyway, we'll count the 2078 * number of different gref's in it, which will tell us how many 2079 * responses to generate 2080 */ 2081 for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { 2082 int16_t status = gnttab[gnt_idx].status; 2083 if (status != GNTST_okay) { 2084 DPRINTF( 2085 "Got error %d for hypervisor gnttab_copy status\n", 2086 status); 2087 error = 1; 2088 break; 2089 } 2090 if (gnttab[gnt_idx].dest.u.ref != last_gref) { 2091 n_responses++; 2092 last_gref = gnttab[gnt_idx].dest.u.ref; 2093 } 2094 } 2095 2096 if (error != 0) { 2097 uint16_t id; 2098 netif_rx_response_t *rsp; 2099 2100 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; 2101 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 2102 rsp->id = id; 2103 rsp->status = NETIF_RSP_ERROR; 2104 n_responses = 1; 2105 } else { 2106 gnt_idx = 0; 2107 const int has_extra = pkt->flags & NETRXF_extra_info; 2108 if (has_extra != 0) 2109 n_responses++; 2110 2111 for (i = 0; i < n_responses; i++) { 2112 netif_rx_request_t rxq; 2113 netif_rx_response_t *rsp; 2114 2115 r_idx = ring->rsp_prod_pvt + i; 2116 /* 2117 * We copy the structure of rxq instead of making a 2118 * pointer because it shares the same memory as rsp. 2119 */ 2120 rxq = *(RING_GET_REQUEST(ring, r_idx)); 2121 rsp = RING_GET_RESPONSE(ring, r_idx); 2122 if (has_extra && (i == 1)) { 2123 netif_extra_info_t *ext = 2124 (netif_extra_info_t*)rsp; 2125 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 2126 ext->flags = 0; 2127 ext->u.gso.size = pkt->extra.u.gso.size; 2128 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 2129 ext->u.gso.pad = 0; 2130 ext->u.gso.features = 0; 2131 } else { 2132 rsp->id = rxq.id; 2133 rsp->status = GNTST_okay; 2134 rsp->offset = 0; 2135 rsp->flags = 0; 2136 if (i < pkt->list_len - 1) 2137 rsp->flags |= NETRXF_more_data; 2138 if ((i == 0) && has_extra) 2139 rsp->flags |= NETRXF_extra_info; 2140 if ((i == 0) && 2141 (pkt->flags & NETRXF_data_validated)) { 2142 rsp->flags |= NETRXF_data_validated; 2143 rsp->flags |= NETRXF_csum_blank; 2144 } 2145 rsp->status = 0; 2146 for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; 2147 gnt_idx++) { 2148 rsp->status += gnttab[gnt_idx].len; 2149 } 2150 } 2151 } 2152 } 2153 2154 ring->req_cons += n_responses; 2155 ring->rsp_prod_pvt += n_responses; 2156 return n_responses; 2157} 2158 2159#if defined(INET) || defined(INET6) 2160/** 2161 * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf 2162 * in the chain must start with a struct ether_header. 2163 * 2164 * XXX This function will perform incorrectly on UDP packets that are split up 2165 * into multiple ethernet frames. 2166 */ 2167static void 2168xnb_add_mbuf_cksum(struct mbuf *mbufc) 2169{ 2170 struct ether_header *eh; 2171 struct ip *iph; 2172 uint16_t ether_type; 2173 2174 eh = mtod(mbufc, struct ether_header*); 2175 ether_type = ntohs(eh->ether_type); 2176 if (ether_type != ETHERTYPE_IP) { 2177 /* Nothing to calculate */ 2178 return; 2179 } 2180 2181 iph = (struct ip*)(eh + 1); 2182 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2183 iph->ip_sum = 0; 2184 iph->ip_sum = in_cksum_hdr(iph); 2185 } 2186 2187 switch (iph->ip_p) { 2188 case IPPROTO_TCP: 2189 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2190 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); 2191 struct tcphdr *th = (struct tcphdr*)(iph + 1); 2192 th->th_sum = in_pseudo(iph->ip_src.s_addr, 2193 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); 2194 th->th_sum = in_cksum_skip(mbufc, 2195 sizeof(struct ether_header) + ntohs(iph->ip_len), 2196 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2197 } 2198 break; 2199 case IPPROTO_UDP: 2200 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2201 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); 2202 struct udphdr *uh = (struct udphdr*)(iph + 1); 2203 uh->uh_sum = in_pseudo(iph->ip_src.s_addr, 2204 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); 2205 uh->uh_sum = in_cksum_skip(mbufc, 2206 sizeof(struct ether_header) + ntohs(iph->ip_len), 2207 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2208 } 2209 break; 2210 default: 2211 break; 2212 } 2213} 2214#endif /* INET || INET6 */ 2215 2216static void 2217xnb_stop(struct xnb_softc *xnb) 2218{ 2219 struct ifnet *ifp; 2220 2221 mtx_assert(&xnb->sc_lock, MA_OWNED); 2222 ifp = xnb->xnb_ifp; 2223 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2224 if_link_state_change(ifp, LINK_STATE_DOWN); 2225} 2226 2227static int 2228xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2229{ 2230 struct xnb_softc *xnb = ifp->if_softc; 2231 struct ifreq *ifr = (struct ifreq*) data; 2232#ifdef INET 2233 struct ifaddr *ifa = (struct ifaddr*)data; 2234#endif 2235 int error = 0; 2236 2237 switch (cmd) { 2238 case SIOCSIFFLAGS: 2239 mtx_lock(&xnb->sc_lock); 2240 if (ifp->if_flags & IFF_UP) { 2241 xnb_ifinit_locked(xnb); 2242 } else { 2243 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2244 xnb_stop(xnb); 2245 } 2246 } 2247 /* 2248 * Note: netfront sets a variable named xn_if_flags 2249 * here, but that variable is never read 2250 */ 2251 mtx_unlock(&xnb->sc_lock); 2252 break; 2253 case SIOCSIFADDR: 2254#ifdef INET 2255 mtx_lock(&xnb->sc_lock); 2256 if (ifa->ifa_addr->sa_family == AF_INET) { 2257 ifp->if_flags |= IFF_UP; 2258 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2259 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | 2260 IFF_DRV_OACTIVE); 2261 if_link_state_change(ifp, 2262 LINK_STATE_DOWN); 2263 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2264 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2265 if_link_state_change(ifp, 2266 LINK_STATE_UP); 2267 } 2268 arp_ifinit(ifp, ifa); 2269 mtx_unlock(&xnb->sc_lock); 2270 } else { 2271 mtx_unlock(&xnb->sc_lock); 2272#endif 2273 error = ether_ioctl(ifp, cmd, data); 2274#ifdef INET 2275 } 2276#endif 2277 break; 2278 case SIOCSIFCAP: 2279 mtx_lock(&xnb->sc_lock); 2280 if (ifr->ifr_reqcap & IFCAP_TXCSUM) { 2281 ifp->if_capenable |= IFCAP_TXCSUM; 2282 ifp->if_hwassist |= XNB_CSUM_FEATURES; 2283 } else { 2284 ifp->if_capenable &= ~(IFCAP_TXCSUM); 2285 ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); 2286 } 2287 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { 2288 ifp->if_capenable |= IFCAP_RXCSUM; 2289 } else { 2290 ifp->if_capenable &= ~(IFCAP_RXCSUM); 2291 } 2292 /* 2293 * TODO enable TSO4 and LRO once we no longer need 2294 * to calculate checksums in software 2295 */ 2296#if 0 2297 if (ifr->if_reqcap |= IFCAP_TSO4) { 2298 if (IFCAP_TXCSUM & ifp->if_capenable) { 2299 printf("xnb: Xen netif requires that " 2300 "TXCSUM be enabled in order " 2301 "to use TSO4\n"); 2302 error = EINVAL; 2303 } else { 2304 ifp->if_capenable |= IFCAP_TSO4; 2305 ifp->if_hwassist |= CSUM_TSO; 2306 } 2307 } else { 2308 ifp->if_capenable &= ~(IFCAP_TSO4); 2309 ifp->if_hwassist &= ~(CSUM_TSO); 2310 } 2311 if (ifr->ifreqcap |= IFCAP_LRO) { 2312 ifp->if_capenable |= IFCAP_LRO; 2313 } else { 2314 ifp->if_capenable &= ~(IFCAP_LRO); 2315 } 2316#endif 2317 mtx_unlock(&xnb->sc_lock); 2318 break; 2319 case SIOCSIFMTU: 2320 ifp->if_mtu = ifr->ifr_mtu; 2321 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2322 xnb_ifinit(xnb); 2323 break; 2324 case SIOCADDMULTI: 2325 case SIOCDELMULTI: 2326 case SIOCSIFMEDIA: 2327 case SIOCGIFMEDIA: 2328 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); 2329 break; 2330 default: 2331 error = ether_ioctl(ifp, cmd, data); 2332 break; 2333 } 2334 return (error); 2335} 2336 2337static void 2338xnb_start_locked(struct ifnet *ifp) 2339{ 2340 netif_rx_back_ring_t *rxb; 2341 struct xnb_softc *xnb; 2342 struct mbuf *mbufc; 2343 RING_IDX req_prod_local; 2344 2345 xnb = ifp->if_softc; 2346 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 2347 2348 if (!xnb->carrier) 2349 return; 2350 2351 do { 2352 int out_of_space = 0; 2353 int notify; 2354 req_prod_local = rxb->sring->req_prod; 2355 xen_rmb(); 2356 for (;;) { 2357 int error; 2358 2359 IF_DEQUEUE(&ifp->if_snd, mbufc); 2360 if (mbufc == NULL) 2361 break; 2362 error = xnb_send(rxb, xnb->otherend_id, mbufc, 2363 xnb->rx_gnttab); 2364 switch (error) { 2365 case EAGAIN: 2366 /* 2367 * Insufficient space in the ring. 2368 * Requeue pkt and send when space is 2369 * available. 2370 */ 2371 IF_PREPEND(&ifp->if_snd, mbufc); 2372 /* 2373 * Perhaps the frontend missed an IRQ 2374 * and went to sleep. Notify it to wake 2375 * it up. 2376 */ 2377 out_of_space = 1; 2378 break; 2379 2380 case EINVAL: 2381 /* OS gave a corrupt packet. Drop it.*/ 2382 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2383 /* FALLTHROUGH */ 2384 default: 2385 /* Send succeeded, or packet had error. 2386 * Free the packet */ 2387 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2388 if (mbufc) 2389 m_freem(mbufc); 2390 break; 2391 } 2392 if (out_of_space != 0) 2393 break; 2394 } 2395 2396 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); 2397 if ((notify != 0) || (out_of_space != 0)) 2398 xen_intr_signal(xnb->xen_intr_handle); 2399 rxb->sring->req_event = req_prod_local + 1; 2400 xen_mb(); 2401 } while (rxb->sring->req_prod != req_prod_local) ; 2402} 2403 2404/** 2405 * Sends one packet to the ring. Blocks until the packet is on the ring 2406 * \param[in] mbufc Contains one packet to send. Caller must free 2407 * \param[in,out] rxb The packet will be pushed onto this ring, but the 2408 * otherend will not be notified. 2409 * \param[in] otherend The domain ID of the other end of the connection 2410 * \retval EAGAIN The ring did not have enough space for the packet. 2411 * The ring has not been modified 2412 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 2413 * this a function parameter so that we will take less 2414 * stack space. 2415 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 2416 */ 2417static int 2418xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, 2419 gnttab_copy_table gnttab) 2420{ 2421 struct xnb_pkt pkt; 2422 int error, n_entries, n_reqs; 2423 RING_IDX space; 2424 2425 space = ring->sring->req_prod - ring->req_cons; 2426 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); 2427 if (error != 0) 2428 return error; 2429 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); 2430 if (n_entries != 0) { 2431 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 2432 gnttab, n_entries); 2433 KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", 2434 hv_ret)); 2435 } 2436 2437 n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); 2438 2439 return 0; 2440} 2441 2442static void 2443xnb_start(struct ifnet *ifp) 2444{ 2445 struct xnb_softc *xnb; 2446 2447 xnb = ifp->if_softc; 2448 mtx_lock(&xnb->rx_lock); 2449 xnb_start_locked(ifp); 2450 mtx_unlock(&xnb->rx_lock); 2451} 2452 2453/* equivalent of network_open() in Linux */ 2454static void 2455xnb_ifinit_locked(struct xnb_softc *xnb) 2456{ 2457 struct ifnet *ifp; 2458 2459 ifp = xnb->xnb_ifp; 2460 2461 mtx_assert(&xnb->sc_lock, MA_OWNED); 2462 2463 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2464 return; 2465 2466 xnb_stop(xnb); 2467 2468 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2469 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2470 if_link_state_change(ifp, LINK_STATE_UP); 2471} 2472 2473 2474static void 2475xnb_ifinit(void *xsc) 2476{ 2477 struct xnb_softc *xnb = xsc; 2478 2479 mtx_lock(&xnb->sc_lock); 2480 xnb_ifinit_locked(xnb); 2481 mtx_unlock(&xnb->sc_lock); 2482} 2483 2484/** 2485 * Callback used by the generic networking code to tell us when our carrier 2486 * state has changed. Since we don't have a physical carrier, we don't care 2487 */ 2488static int 2489xnb_ifmedia_upd(struct ifnet *ifp) 2490{ 2491 return (0); 2492} 2493 2494/** 2495 * Callback used by the generic networking code to ask us what our carrier 2496 * state is. Since we don't have a physical carrier, this is very simple 2497 */ 2498static void 2499xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2500{ 2501 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2502 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2503} 2504 2505 2506/*---------------------------- NewBus Registration ---------------------------*/ 2507static device_method_t xnb_methods[] = { 2508 /* Device interface */ 2509 DEVMETHOD(device_probe, xnb_probe), 2510 DEVMETHOD(device_attach, xnb_attach), 2511 DEVMETHOD(device_detach, xnb_detach), 2512 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2513 DEVMETHOD(device_suspend, xnb_suspend), 2514 DEVMETHOD(device_resume, xnb_resume), 2515 2516 /* Xenbus interface */ 2517 DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), 2518 2519 { 0, 0 } 2520}; 2521 2522static driver_t xnb_driver = { 2523 "xnb", 2524 xnb_methods, 2525 sizeof(struct xnb_softc), 2526}; 2527devclass_t xnb_devclass; 2528 2529DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); 2530 2531 2532/*-------------------------- Unit Tests -------------------------------------*/ 2533#ifdef XNB_DEBUG 2534#include "netback_unit_tests.c" 2535#endif
| 832 return (0); 833} 834 835/** 836 * Collect information from the XenStore related to our device and its frontend 837 * 838 * \param xnb Per-instance xnb configuration structure. 839 */ 840static int 841xnb_collect_xenstore_info(struct xnb_softc *xnb) 842{ 843 /** 844 * \todo Linux collects the following info. We should collect most 845 * of this, too: 846 * "feature-rx-notify" 847 */ 848 const char *otherend_path; 849 const char *our_path; 850 int err; 851 unsigned int rx_copy, bridge_len; 852 uint8_t no_csum_offload; 853 854 otherend_path = xenbus_get_otherend_path(xnb->dev); 855 our_path = xenbus_get_node(xnb->dev); 856 857 /* Collect the critical communication parameters */ 858 err = xs_gather(XST_NIL, otherend_path, 859 "tx-ring-ref", "%l" PRIu32, 860 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, 861 "rx-ring-ref", "%l" PRIu32, 862 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, 863 "event-channel", "%" PRIu32, &xnb->evtchn, 864 NULL); 865 if (err != 0) { 866 xenbus_dev_fatal(xnb->dev, err, 867 "Unable to retrieve ring information from " 868 "frontend %s. Unable to connect.", 869 otherend_path); 870 return (err); 871 } 872 873 /* Collect the handle from xenstore */ 874 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); 875 if (err != 0) { 876 xenbus_dev_fatal(xnb->dev, err, 877 "Error reading handle from frontend %s. " 878 "Unable to connect.", otherend_path); 879 } 880 881 /* 882 * Collect the bridgename, if any. We do not need bridge_len; we just 883 * throw it away 884 */ 885 err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, 886 (void**)&xnb->bridge); 887 if (err != 0) 888 xnb->bridge = NULL; 889 890 /* 891 * Does the frontend request that we use rx copy? If not, return an 892 * error because this driver only supports rx copy. 893 */ 894 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, 895 "%" PRIu32, &rx_copy); 896 if (err == ENOENT) { 897 err = 0; 898 rx_copy = 0; 899 } 900 if (err < 0) { 901 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", 902 otherend_path); 903 return err; 904 } 905 /** 906 * \todo: figure out the exact meaning of this feature, and when 907 * the frontend will set it to true. It should be set to true 908 * at some point 909 */ 910/* if (!rx_copy)*/ 911/* return EOPNOTSUPP;*/ 912 913 /** \todo Collect the rx notify feature */ 914 915 /* Collect the feature-sg. */ 916 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, 917 "%hhu", &xnb->can_sg) < 0) 918 xnb->can_sg = 0; 919 920 /* Collect remaining frontend features */ 921 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, 922 "%hhu", &xnb->gso) < 0) 923 xnb->gso = 0; 924 925 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, 926 "%hhu", &xnb->gso_prefix) < 0) 927 xnb->gso_prefix = 0; 928 929 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, 930 "%hhu", &no_csum_offload) < 0) 931 no_csum_offload = 0; 932 xnb->ip_csum = (no_csum_offload == 0); 933 934 return (0); 935} 936 937/** 938 * Supply information about the physical device to the frontend 939 * via XenBus. 940 * 941 * \param xnb Per-instance xnb configuration structure. 942 */ 943static int 944xnb_publish_backend_info(struct xnb_softc *xnb) 945{ 946 struct xs_transaction xst; 947 const char *our_path; 948 int error; 949 950 our_path = xenbus_get_node(xnb->dev); 951 952 do { 953 error = xs_transaction_start(&xst); 954 if (error != 0) { 955 xenbus_dev_fatal(xnb->dev, error, 956 "Error publishing backend info " 957 "(start transaction)"); 958 break; 959 } 960 961 error = xs_printf(xst, our_path, "feature-sg", 962 "%d", XNB_SG); 963 if (error != 0) 964 break; 965 966 error = xs_printf(xst, our_path, "feature-gso-tcpv4", 967 "%d", XNB_GSO_TCPV4); 968 if (error != 0) 969 break; 970 971 error = xs_printf(xst, our_path, "feature-rx-copy", 972 "%d", XNB_RX_COPY); 973 if (error != 0) 974 break; 975 976 error = xs_printf(xst, our_path, "feature-rx-flip", 977 "%d", XNB_RX_FLIP); 978 if (error != 0) 979 break; 980 981 error = xs_transaction_end(xst, 0); 982 if (error != 0 && error != EAGAIN) { 983 xenbus_dev_fatal(xnb->dev, error, "ending transaction"); 984 break; 985 } 986 987 } while (error == EAGAIN); 988 989 return (error); 990} 991 992/** 993 * Connect to our netfront peer now that it has completed publishing 994 * its configuration into the XenStore. 995 * 996 * \param xnb Per-instance xnb configuration structure. 997 */ 998static void 999xnb_connect(struct xnb_softc *xnb) 1000{ 1001 int error; 1002 1003 if (xenbus_get_state(xnb->dev) == XenbusStateConnected) 1004 return; 1005 1006 if (xnb_collect_xenstore_info(xnb) != 0) 1007 return; 1008 1009 xnb->flags &= ~XNBF_SHUTDOWN; 1010 1011 /* Read front end configuration. */ 1012 1013 /* Allocate resources whose size depends on front-end configuration. */ 1014 error = xnb_alloc_communication_mem(xnb); 1015 if (error != 0) { 1016 xenbus_dev_fatal(xnb->dev, error, 1017 "Unable to allocate communication memory"); 1018 return; 1019 } 1020 1021 /* 1022 * Connect communication channel. 1023 */ 1024 error = xnb_connect_comms(xnb); 1025 if (error != 0) { 1026 /* Specific errors are reported by xnb_connect_comms(). */ 1027 return; 1028 } 1029 xnb->carrier = 1; 1030 1031 /* Ready for I/O. */ 1032 xenbus_set_state(xnb->dev, XenbusStateConnected); 1033} 1034 1035/*-------------------------- Device Teardown Support -------------------------*/ 1036/** 1037 * Perform device shutdown functions. 1038 * 1039 * \param xnb Per-instance xnb configuration structure. 1040 * 1041 * Mark this instance as shutting down, wait for any active requests 1042 * to drain, disconnect from the front-end, and notify any waiters (e.g. 1043 * a thread invoking our detach method) that detach can now proceed. 1044 */ 1045static int 1046xnb_shutdown(struct xnb_softc *xnb) 1047{ 1048 /* 1049 * Due to the need to drop our mutex during some 1050 * xenbus operations, it is possible for two threads 1051 * to attempt to close out shutdown processing at 1052 * the same time. Tell the caller that hits this 1053 * race to try back later. 1054 */ 1055 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) 1056 return (EAGAIN); 1057 1058 xnb->flags |= XNBF_SHUTDOWN; 1059 1060 xnb->flags |= XNBF_IN_SHUTDOWN; 1061 1062 mtx_unlock(&xnb->sc_lock); 1063 /* Free the network interface */ 1064 xnb->carrier = 0; 1065 if (xnb->xnb_ifp != NULL) { 1066 ether_ifdetach(xnb->xnb_ifp); 1067 if_free(xnb->xnb_ifp); 1068 xnb->xnb_ifp = NULL; 1069 } 1070 mtx_lock(&xnb->sc_lock); 1071 1072 xnb_disconnect(xnb); 1073 1074 mtx_unlock(&xnb->sc_lock); 1075 if (xenbus_get_state(xnb->dev) < XenbusStateClosing) 1076 xenbus_set_state(xnb->dev, XenbusStateClosing); 1077 mtx_lock(&xnb->sc_lock); 1078 1079 xnb->flags &= ~XNBF_IN_SHUTDOWN; 1080 1081 1082 /* Indicate to xnb_detach() that is it safe to proceed. */ 1083 wakeup(xnb); 1084 1085 return (0); 1086} 1087 1088/** 1089 * Report an attach time error to the console and Xen, and cleanup 1090 * this instance by forcing immediate detach processing. 1091 * 1092 * \param xnb Per-instance xnb configuration structure. 1093 * \param err Errno describing the error. 1094 * \param fmt Printf style format and arguments 1095 */ 1096static void 1097xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) 1098{ 1099 va_list ap; 1100 va_list ap_hotplug; 1101 1102 va_start(ap, fmt); 1103 va_copy(ap_hotplug, ap); 1104 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), 1105 "hotplug-error", fmt, ap_hotplug); 1106 va_end(ap_hotplug); 1107 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1108 "hotplug-status", "error"); 1109 1110 xenbus_dev_vfatal(xnb->dev, err, fmt, ap); 1111 va_end(ap); 1112 1113 xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1114 "online", "0"); 1115 xnb_detach(xnb->dev); 1116} 1117 1118/*---------------------------- NewBus Entrypoints ----------------------------*/ 1119/** 1120 * Inspect a XenBus device and claim it if is of the appropriate type. 1121 * 1122 * \param dev NewBus device object representing a candidate XenBus device. 1123 * 1124 * \return 0 for success, errno codes for failure. 1125 */ 1126static int 1127xnb_probe(device_t dev) 1128{ 1129 if (!strcmp(xenbus_get_type(dev), "vif")) { 1130 DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), 1131 devclass_get_name(device_get_devclass(dev))); 1132 device_set_desc(dev, "Backend Virtual Network Device"); 1133 device_quiet(dev); 1134 return (0); 1135 } 1136 return (ENXIO); 1137} 1138 1139/** 1140 * Setup sysctl variables to control various Network Back parameters. 1141 * 1142 * \param xnb Xen Net Back softc. 1143 * 1144 */ 1145static void 1146xnb_setup_sysctl(struct xnb_softc *xnb) 1147{ 1148 struct sysctl_ctx_list *sysctl_ctx = NULL; 1149 struct sysctl_oid *sysctl_tree = NULL; 1150 1151 sysctl_ctx = device_get_sysctl_ctx(xnb->dev); 1152 if (sysctl_ctx == NULL) 1153 return; 1154 1155 sysctl_tree = device_get_sysctl_tree(xnb->dev); 1156 if (sysctl_tree == NULL) 1157 return; 1158 1159#ifdef XNB_DEBUG 1160 SYSCTL_ADD_PROC(sysctl_ctx, 1161 SYSCTL_CHILDREN(sysctl_tree), 1162 OID_AUTO, 1163 "unit_test_results", 1164 CTLTYPE_STRING | CTLFLAG_RD, 1165 xnb, 1166 0, 1167 xnb_unit_test_main, 1168 "A", 1169 "Results of builtin unit tests"); 1170 1171 SYSCTL_ADD_PROC(sysctl_ctx, 1172 SYSCTL_CHILDREN(sysctl_tree), 1173 OID_AUTO, 1174 "dump_rings", 1175 CTLTYPE_STRING | CTLFLAG_RD, 1176 xnb, 1177 0, 1178 xnb_dump_rings, 1179 "A", 1180 "Xennet Back Rings"); 1181#endif /* XNB_DEBUG */ 1182} 1183 1184/** 1185 * Create a network device. 1186 * @param handle device handle 1187 */ 1188int 1189create_netdev(device_t dev) 1190{ 1191 struct ifnet *ifp; 1192 struct xnb_softc *xnb; 1193 int err = 0; 1194 uint32_t handle; 1195 1196 xnb = device_get_softc(dev); 1197 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); 1198 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); 1199 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); 1200 1201 xnb->dev = dev; 1202 1203 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); 1204 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1205 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); 1206 1207 /* 1208 * Set the MAC address to a dummy value (00:00:00:00:00), 1209 * if the MAC address of the host-facing interface is set 1210 * to the same as the guest-facing one (the value found in 1211 * xenstore), the bridge would stop delivering packets to 1212 * us because it would see that the destination address of 1213 * the packet is the same as the interface, and so the bridge 1214 * would expect the packet has already been delivered locally 1215 * (and just drop it). 1216 */ 1217 bzero(&xnb->mac[0], sizeof(xnb->mac)); 1218 1219 /* The interface will be named using the following nomenclature: 1220 * 1221 * xnb<domid>.<handle> 1222 * 1223 * Where handle is the oder of the interface referred to the guest. 1224 */ 1225 err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, 1226 "%" PRIu32, &handle); 1227 if (err != 0) 1228 return (err); 1229 snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, 1230 xenbus_get_otherend_id(dev), handle); 1231 1232 if (err == 0) { 1233 /* Set up ifnet structure */ 1234 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); 1235 ifp->if_softc = xnb; 1236 if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); 1237 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1238 ifp->if_ioctl = xnb_ioctl; 1239 ifp->if_output = ether_output; 1240 ifp->if_start = xnb_start; 1241#ifdef notyet 1242 ifp->if_watchdog = xnb_watchdog; 1243#endif 1244 ifp->if_init = xnb_ifinit; 1245 ifp->if_mtu = ETHERMTU; 1246 ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; 1247 1248 ifp->if_hwassist = XNB_CSUM_FEATURES; 1249 ifp->if_capabilities = IFCAP_HWCSUM; 1250 ifp->if_capenable = IFCAP_HWCSUM; 1251 1252 ether_ifattach(ifp, xnb->mac); 1253 xnb->carrier = 0; 1254 } 1255 1256 return err; 1257} 1258 1259/** 1260 * Attach to a XenBus device that has been claimed by our probe routine. 1261 * 1262 * \param dev NewBus device object representing this Xen Net Back instance. 1263 * 1264 * \return 0 for success, errno codes for failure. 1265 */ 1266static int 1267xnb_attach(device_t dev) 1268{ 1269 struct xnb_softc *xnb; 1270 int error; 1271 xnb_ring_type_t i; 1272 1273 error = create_netdev(dev); 1274 if (error != 0) { 1275 xenbus_dev_fatal(dev, error, "creating netdev"); 1276 return (error); 1277 } 1278 1279 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 1280 1281 /* 1282 * Basic initialization. 1283 * After this block it is safe to call xnb_detach() 1284 * to clean up any allocated data for this instance. 1285 */ 1286 xnb = device_get_softc(dev); 1287 xnb->otherend_id = xenbus_get_otherend_id(dev); 1288 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 1289 xnb->ring_configs[i].ring_pages = 1; 1290 } 1291 1292 /* 1293 * Setup sysctl variables. 1294 */ 1295 xnb_setup_sysctl(xnb); 1296 1297 /* Update hot-plug status to satisfy xend. */ 1298 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1299 "hotplug-status", "connected"); 1300 if (error != 0) { 1301 xnb_attach_failed(xnb, error, "writing %s/hotplug-status", 1302 xenbus_get_node(xnb->dev)); 1303 return (error); 1304 } 1305 1306 if ((error = xnb_publish_backend_info(xnb)) != 0) { 1307 /* 1308 * If we can't publish our data, we cannot participate 1309 * in this connection, and waiting for a front-end state 1310 * change will not help the situation. 1311 */ 1312 xnb_attach_failed(xnb, error, 1313 "Publishing backend status for %s", 1314 xenbus_get_node(xnb->dev)); 1315 return error; 1316 } 1317 1318 /* Tell the front end that we are ready to connect. */ 1319 xenbus_set_state(dev, XenbusStateInitWait); 1320 1321 return (0); 1322} 1323 1324/** 1325 * Detach from a net back device instance. 1326 * 1327 * \param dev NewBus device object representing this Xen Net Back instance. 1328 * 1329 * \return 0 for success, errno codes for failure. 1330 * 1331 * \note A net back device may be detached at any time in its life-cycle, 1332 * including part way through the attach process. For this reason, 1333 * initialization order and the intialization state checks in this 1334 * routine must be carefully coupled so that attach time failures 1335 * are gracefully handled. 1336 */ 1337static int 1338xnb_detach(device_t dev) 1339{ 1340 struct xnb_softc *xnb; 1341 1342 DPRINTF("\n"); 1343 1344 xnb = device_get_softc(dev); 1345 mtx_lock(&xnb->sc_lock); 1346 while (xnb_shutdown(xnb) == EAGAIN) { 1347 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, 1348 "xnb_shutdown", 0); 1349 } 1350 mtx_unlock(&xnb->sc_lock); 1351 DPRINTF("\n"); 1352 1353 mtx_destroy(&xnb->tx_lock); 1354 mtx_destroy(&xnb->rx_lock); 1355 mtx_destroy(&xnb->sc_lock); 1356 return (0); 1357} 1358 1359/** 1360 * Prepare this net back device for suspension of this VM. 1361 * 1362 * \param dev NewBus device object representing this Xen net Back instance. 1363 * 1364 * \return 0 for success, errno codes for failure. 1365 */ 1366static int 1367xnb_suspend(device_t dev) 1368{ 1369 return (0); 1370} 1371 1372/** 1373 * Perform any processing required to recover from a suspended state. 1374 * 1375 * \param dev NewBus device object representing this Xen Net Back instance. 1376 * 1377 * \return 0 for success, errno codes for failure. 1378 */ 1379static int 1380xnb_resume(device_t dev) 1381{ 1382 return (0); 1383} 1384 1385/** 1386 * Handle state changes expressed via the XenStore by our front-end peer. 1387 * 1388 * \param dev NewBus device object representing this Xen 1389 * Net Back instance. 1390 * \param frontend_state The new state of the front-end. 1391 * 1392 * \return 0 for success, errno codes for failure. 1393 */ 1394static void 1395xnb_frontend_changed(device_t dev, XenbusState frontend_state) 1396{ 1397 struct xnb_softc *xnb; 1398 1399 xnb = device_get_softc(dev); 1400 1401 DPRINTF("frontend_state=%s, xnb_state=%s\n", 1402 xenbus_strstate(frontend_state), 1403 xenbus_strstate(xenbus_get_state(xnb->dev))); 1404 1405 switch (frontend_state) { 1406 case XenbusStateInitialising: 1407 break; 1408 case XenbusStateInitialised: 1409 case XenbusStateConnected: 1410 xnb_connect(xnb); 1411 break; 1412 case XenbusStateClosing: 1413 case XenbusStateClosed: 1414 mtx_lock(&xnb->sc_lock); 1415 xnb_shutdown(xnb); 1416 mtx_unlock(&xnb->sc_lock); 1417 if (frontend_state == XenbusStateClosed) 1418 xenbus_set_state(xnb->dev, XenbusStateClosed); 1419 break; 1420 default: 1421 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", 1422 frontend_state); 1423 break; 1424 } 1425} 1426 1427 1428/*---------------------------- Request Processing ----------------------------*/ 1429/** 1430 * Interrupt handler bound to the shared ring's event channel. 1431 * Entry point for the xennet transmit path in netback 1432 * Transfers packets from the Xen ring to the host's generic networking stack 1433 * 1434 * \param arg Callback argument registerd during event channel 1435 * binding - the xnb_softc for this instance. 1436 */ 1437static void 1438xnb_intr(void *arg) 1439{ 1440 struct xnb_softc *xnb; 1441 struct ifnet *ifp; 1442 netif_tx_back_ring_t *txb; 1443 RING_IDX req_prod_local; 1444 1445 xnb = (struct xnb_softc *)arg; 1446 ifp = xnb->xnb_ifp; 1447 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 1448 1449 mtx_lock(&xnb->tx_lock); 1450 do { 1451 int notify; 1452 req_prod_local = txb->sring->req_prod; 1453 xen_rmb(); 1454 1455 for (;;) { 1456 struct mbuf *mbufc; 1457 int err; 1458 1459 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, 1460 xnb->tx_gnttab); 1461 if (err || (mbufc == NULL)) 1462 break; 1463 1464 /* Send the packet to the generic network stack */ 1465 (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); 1466 } 1467 1468 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); 1469 if (notify != 0) 1470 xen_intr_signal(xnb->xen_intr_handle); 1471 1472 txb->sring->req_event = txb->req_cons + 1; 1473 xen_mb(); 1474 } while (txb->sring->req_prod != req_prod_local) ; 1475 mtx_unlock(&xnb->tx_lock); 1476 1477 xnb_start(ifp); 1478} 1479 1480 1481/** 1482 * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. 1483 * Will read exactly 0 or 1 packets from the ring; never a partial packet. 1484 * \param[out] pkt The returned packet. If there is an error building 1485 * the packet, pkt.list_len will be set to 0. 1486 * \param[in] tx_ring Pointer to the Ring that is the input to this function 1487 * \param[in] start The ring index of the first potential request 1488 * \return The number of requests consumed to build this packet 1489 */ 1490static int 1491xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, 1492 RING_IDX start) 1493{ 1494 /* 1495 * Outline: 1496 * 1) Initialize pkt 1497 * 2) Read the first request of the packet 1498 * 3) Read the extras 1499 * 4) Set cdr 1500 * 5) Loop on the remainder of the packet 1501 * 6) Finalize pkt (stuff like car_size and list_len) 1502 */ 1503 int idx = start; 1504 int discard = 0; /* whether to discard the packet */ 1505 int more_data = 0; /* there are more request past the last one */ 1506 uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ 1507 1508 xnb_pkt_initialize(pkt); 1509 1510 /* Read the first request */ 1511 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1512 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1513 pkt->size = tx->size; 1514 pkt->flags = tx->flags & ~NETTXF_more_data; 1515 more_data = tx->flags & NETTXF_more_data; 1516 pkt->list_len++; 1517 pkt->car = idx; 1518 idx++; 1519 } 1520 1521 /* Read the extra info */ 1522 if ((pkt->flags & NETTXF_extra_info) && 1523 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1524 netif_extra_info_t *ext = 1525 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); 1526 pkt->extra.type = ext->type; 1527 switch (pkt->extra.type) { 1528 case XEN_NETIF_EXTRA_TYPE_GSO: 1529 pkt->extra.u.gso = ext->u.gso; 1530 break; 1531 default: 1532 /* 1533 * The reference Linux netfront driver will 1534 * never set any other extra.type. So we don't 1535 * know what to do with it. Let's print an 1536 * error, then consume and discard the packet 1537 */ 1538 printf("xnb(%s:%d): Unknown extra info type %d." 1539 " Discarding packet\n", 1540 __func__, __LINE__, pkt->extra.type); 1541 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, 1542 start)); 1543 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, 1544 idx)); 1545 discard = 1; 1546 break; 1547 } 1548 1549 pkt->extra.flags = ext->flags; 1550 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { 1551 /* 1552 * The reference linux netfront driver never sets this 1553 * flag (nor does any other known netfront). So we 1554 * will discard the packet. 1555 */ 1556 printf("xnb(%s:%d): Request sets " 1557 "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " 1558 "that\n", __func__, __LINE__); 1559 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1560 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1561 discard = 1; 1562 } 1563 1564 idx++; 1565 } 1566 1567 /* Set cdr. If there is not more data, cdr is invalid */ 1568 pkt->cdr = idx; 1569 1570 /* Loop on remainder of packet */ 1571 while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1572 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1573 pkt->list_len++; 1574 cdr_size += tx->size; 1575 if (tx->flags & ~NETTXF_more_data) { 1576 /* There should be no other flags set at this point */ 1577 printf("xnb(%s:%d): Request sets unknown flags %d " 1578 "after the 1st request in the packet.\n", 1579 __func__, __LINE__, tx->flags); 1580 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1581 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1582 } 1583 1584 more_data = tx->flags & NETTXF_more_data; 1585 idx++; 1586 } 1587 1588 /* Finalize packet */ 1589 if (more_data != 0) { 1590 /* The ring ran out of requests before finishing the packet */ 1591 xnb_pkt_invalidate(pkt); 1592 idx = start; /* tell caller that we consumed no requests */ 1593 } else { 1594 /* Calculate car_size */ 1595 pkt->car_size = pkt->size - cdr_size; 1596 } 1597 if (discard != 0) { 1598 xnb_pkt_invalidate(pkt); 1599 } 1600 1601 return idx - start; 1602} 1603 1604 1605/** 1606 * Respond to all the requests that constituted pkt. Builds the responses and 1607 * writes them to the ring, but doesn't push them to the shared ring. 1608 * \param[in] pkt the packet that needs a response 1609 * \param[in] error true if there was an error handling the packet, such 1610 * as in the hypervisor copy op or mbuf allocation 1611 * \param[out] ring Responses go here 1612 */ 1613static void 1614xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, 1615 int error) 1616{ 1617 /* 1618 * Outline: 1619 * 1) Respond to the first request 1620 * 2) Respond to the extra info reques 1621 * Loop through every remaining request in the packet, generating 1622 * responses that copy those requests' ids and sets the status 1623 * appropriately. 1624 */ 1625 netif_tx_request_t *tx; 1626 netif_tx_response_t *rsp; 1627 int i; 1628 uint16_t status; 1629 1630 status = (xnb_pkt_is_valid(pkt) == 0) || error ? 1631 NETIF_RSP_ERROR : NETIF_RSP_OKAY; 1632 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), 1633 ("Cannot respond to ring requests out of order")); 1634 1635 if (pkt->list_len >= 1) { 1636 uint16_t id; 1637 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1638 id = tx->id; 1639 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1640 rsp->id = id; 1641 rsp->status = status; 1642 ring->rsp_prod_pvt++; 1643 1644 if (pkt->flags & NETRXF_extra_info) { 1645 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1646 rsp->status = NETIF_RSP_NULL; 1647 ring->rsp_prod_pvt++; 1648 } 1649 } 1650 1651 for (i=0; i < pkt->list_len - 1; i++) { 1652 uint16_t id; 1653 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1654 id = tx->id; 1655 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1656 rsp->id = id; 1657 rsp->status = status; 1658 ring->rsp_prod_pvt++; 1659 } 1660} 1661 1662/** 1663 * Create an mbuf chain to represent a packet. Initializes all of the headers 1664 * in the mbuf chain, but does not copy the data. The returned chain must be 1665 * free()'d when no longer needed 1666 * \param[in] pkt A packet to model the mbuf chain after 1667 * \return A newly allocated mbuf chain, possibly with clusters attached. 1668 * NULL on failure 1669 */ 1670static struct mbuf* 1671xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) 1672{ 1673 /** 1674 * \todo consider using a memory pool for mbufs instead of 1675 * reallocating them for every packet 1676 */ 1677 /** \todo handle extra data */ 1678 struct mbuf *m; 1679 1680 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); 1681 1682 if (m != NULL) { 1683 m->m_pkthdr.rcvif = ifp; 1684 if (pkt->flags & NETTXF_data_validated) { 1685 /* 1686 * We lie to the host OS and always tell it that the 1687 * checksums are ok, because the packet is unlikely to 1688 * get corrupted going across domains. 1689 */ 1690 m->m_pkthdr.csum_flags = ( 1691 CSUM_IP_CHECKED | 1692 CSUM_IP_VALID | 1693 CSUM_DATA_VALID | 1694 CSUM_PSEUDO_HDR 1695 ); 1696 m->m_pkthdr.csum_data = 0xffff; 1697 } 1698 } 1699 return m; 1700} 1701 1702/** 1703 * Build a gnttab_copy table that can be used to copy data from a pkt 1704 * to an mbufc. Does not actually perform the copy. Always uses gref's on 1705 * the packet side. 1706 * \param[in] pkt pkt's associated requests form the src for 1707 * the copy operation 1708 * \param[in] mbufc mbufc's storage forms the dest for the copy operation 1709 * \param[out] gnttab Storage for the returned grant table 1710 * \param[in] txb Pointer to the backend ring structure 1711 * \param[in] otherend_id The domain ID of the other end of the copy 1712 * \return The number of gnttab entries filled 1713 */ 1714static int 1715xnb_txpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1716 gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, 1717 domid_t otherend_id) 1718{ 1719 1720 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1721 int gnt_idx = 0; /* index into grant table */ 1722 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ 1723 int r_ofs = 0; /* offset of next data within tx request's data area */ 1724 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1725 /* size in bytes that still needs to be represented in the table */ 1726 uint16_t size_remaining = pkt->size; 1727 1728 while (size_remaining > 0) { 1729 const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); 1730 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; 1731 const size_t req_size = 1732 r_idx == pkt->car ? pkt->car_size : txq->size; 1733 const size_t pkt_space = req_size - r_ofs; 1734 /* 1735 * space is the largest amount of data that can be copied in the 1736 * grant table's next entry 1737 */ 1738 const size_t space = MIN(pkt_space, mbuf_space); 1739 1740 /* TODO: handle this error condition without panicking */ 1741 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1742 1743 gnttab[gnt_idx].source.u.ref = txq->gref; 1744 gnttab[gnt_idx].source.domid = otherend_id; 1745 gnttab[gnt_idx].source.offset = txq->offset + r_ofs; 1746 gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( 1747 mtod(mbuf, vm_offset_t) + m_ofs); 1748 gnttab[gnt_idx].dest.offset = virt_to_offset( 1749 mtod(mbuf, vm_offset_t) + m_ofs); 1750 gnttab[gnt_idx].dest.domid = DOMID_SELF; 1751 gnttab[gnt_idx].len = space; 1752 gnttab[gnt_idx].flags = GNTCOPY_source_gref; 1753 1754 gnt_idx++; 1755 r_ofs += space; 1756 m_ofs += space; 1757 size_remaining -= space; 1758 if (req_size - r_ofs <= 0) { 1759 /* Must move to the next tx request */ 1760 r_ofs = 0; 1761 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 1762 } 1763 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { 1764 /* Must move to the next mbuf */ 1765 m_ofs = 0; 1766 mbuf = mbuf->m_next; 1767 } 1768 } 1769 1770 return gnt_idx; 1771} 1772 1773/** 1774 * Check the status of the grant copy operations, and update mbufs various 1775 * non-data fields to reflect the data present. 1776 * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of 1777 * the correct length, and data should already be present 1778 * \param[in] gnttab A grant table for a just completed copy op 1779 * \param[in] n_entries The number of valid entries in the grant table 1780 */ 1781static void 1782xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, 1783 int n_entries) 1784{ 1785 struct mbuf *mbuf = mbufc; 1786 int i; 1787 size_t total_size = 0; 1788 1789 for (i = 0; i < n_entries; i++) { 1790 KASSERT(gnttab[i].status == GNTST_okay, 1791 ("Some gnttab_copy entry had error status %hd\n", 1792 gnttab[i].status)); 1793 1794 mbuf->m_len += gnttab[i].len; 1795 total_size += gnttab[i].len; 1796 if (M_TRAILINGSPACE(mbuf) <= 0) { 1797 mbuf = mbuf->m_next; 1798 } 1799 } 1800 mbufc->m_pkthdr.len = total_size; 1801 1802#if defined(INET) || defined(INET6) 1803 xnb_add_mbuf_cksum(mbufc); 1804#endif 1805} 1806 1807/** 1808 * Dequeue at most one packet from the shared ring 1809 * \param[in,out] txb Netif tx ring. A packet will be removed from it, and 1810 * its private indices will be updated. But the indices 1811 * will not be pushed to the shared ring. 1812 * \param[in] ifnet Interface to which the packet will be sent 1813 * \param[in] otherend Domain ID of the other end of the ring 1814 * \param[out] mbufc The assembled mbuf chain, ready to send to the generic 1815 * networking stack 1816 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 1817 * this a function parameter so that we will take less 1818 * stack space. 1819 * \return An error code 1820 */ 1821static int 1822xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, 1823 struct ifnet *ifnet, gnttab_copy_table gnttab) 1824{ 1825 struct xnb_pkt pkt; 1826 /* number of tx requests consumed to build the last packet */ 1827 int num_consumed; 1828 int nr_ents; 1829 1830 *mbufc = NULL; 1831 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); 1832 if (num_consumed == 0) 1833 return 0; /* Nothing to receive */ 1834 1835 /* update statistics independent of errors */ 1836 if_inc_counter(ifnet, IFCOUNTER_IPACKETS, 1); 1837 1838 /* 1839 * if we got here, then 1 or more requests was consumed, but the packet 1840 * is not necessarily valid. 1841 */ 1842 if (xnb_pkt_is_valid(&pkt) == 0) { 1843 /* got a garbage packet, respond and drop it */ 1844 xnb_txpkt2rsp(&pkt, txb, 1); 1845 txb->req_cons += num_consumed; 1846 DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", 1847 num_consumed); 1848 if_inc_counter(ifnet, IFCOUNTER_IERRORS, 1); 1849 return EINVAL; 1850 } 1851 1852 *mbufc = xnb_pkt2mbufc(&pkt, ifnet); 1853 1854 if (*mbufc == NULL) { 1855 /* 1856 * Couldn't allocate mbufs. Respond and drop the packet. Do 1857 * not consume the requests 1858 */ 1859 xnb_txpkt2rsp(&pkt, txb, 1); 1860 DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", 1861 num_consumed); 1862 if_inc_counter(ifnet, IFCOUNTER_IQDROPS, 1); 1863 return ENOMEM; 1864 } 1865 1866 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); 1867 1868 if (nr_ents > 0) { 1869 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 1870 gnttab, nr_ents); 1871 KASSERT(hv_ret == 0, 1872 ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); 1873 xnb_update_mbufc(*mbufc, gnttab, nr_ents); 1874 } 1875 1876 xnb_txpkt2rsp(&pkt, txb, 0); 1877 txb->req_cons += num_consumed; 1878 return 0; 1879} 1880 1881/** 1882 * Create an xnb_pkt based on the contents of an mbuf chain. 1883 * \param[in] mbufc mbuf chain to transform into a packet 1884 * \param[out] pkt Storage for the newly generated xnb_pkt 1885 * \param[in] start The ring index of the first available slot in the rx 1886 * ring 1887 * \param[in] space The number of free slots in the rx ring 1888 * \retval 0 Success 1889 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 1890 * \retval EAGAIN There was not enough space in the ring to queue the 1891 * packet 1892 */ 1893static int 1894xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, 1895 RING_IDX start, int space) 1896{ 1897 1898 int retval = 0; 1899 1900 if ((mbufc == NULL) || 1901 ( (mbufc->m_flags & M_PKTHDR) == 0) || 1902 (mbufc->m_pkthdr.len == 0)) { 1903 xnb_pkt_invalidate(pkt); 1904 retval = EINVAL; 1905 } else { 1906 int slots_required; 1907 1908 xnb_pkt_validate(pkt); 1909 pkt->flags = 0; 1910 pkt->size = mbufc->m_pkthdr.len; 1911 pkt->car = start; 1912 pkt->car_size = mbufc->m_len; 1913 1914 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { 1915 pkt->flags |= NETRXF_extra_info; 1916 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; 1917 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 1918 pkt->extra.u.gso.pad = 0; 1919 pkt->extra.u.gso.features = 0; 1920 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; 1921 pkt->extra.flags = 0; 1922 pkt->cdr = start + 2; 1923 } else { 1924 pkt->cdr = start + 1; 1925 } 1926 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { 1927 pkt->flags |= 1928 (NETRXF_csum_blank | NETRXF_data_validated); 1929 } 1930 1931 /* 1932 * Each ring response can have up to PAGE_SIZE of data. 1933 * Assume that we can defragment the mbuf chain efficiently 1934 * into responses so that each response but the last uses all 1935 * PAGE_SIZE bytes. 1936 */ 1937 pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE; 1938 1939 if (pkt->list_len > 1) { 1940 pkt->flags |= NETRXF_more_data; 1941 } 1942 1943 slots_required = pkt->list_len + 1944 (pkt->flags & NETRXF_extra_info ? 1 : 0); 1945 if (slots_required > space) { 1946 xnb_pkt_invalidate(pkt); 1947 retval = EAGAIN; 1948 } 1949 } 1950 1951 return retval; 1952} 1953 1954/** 1955 * Build a gnttab_copy table that can be used to copy data from an mbuf chain 1956 * to the frontend's shared buffers. Does not actually perform the copy. 1957 * Always uses gref's on the other end's side. 1958 * \param[in] pkt pkt's associated responses form the dest for the copy 1959 * operatoin 1960 * \param[in] mbufc The source for the copy operation 1961 * \param[out] gnttab Storage for the returned grant table 1962 * \param[in] rxb Pointer to the backend ring structure 1963 * \param[in] otherend_id The domain ID of the other end of the copy 1964 * \return The number of gnttab entries filled 1965 */ 1966static int 1967xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1968 gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, 1969 domid_t otherend_id) 1970{ 1971 1972 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1973 int gnt_idx = 0; /* index into grant table */ 1974 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ 1975 int r_ofs = 0; /* offset of next data within rx request's data area */ 1976 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1977 /* size in bytes that still needs to be represented in the table */ 1978 uint16_t size_remaining; 1979 1980 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; 1981 1982 while (size_remaining > 0) { 1983 const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); 1984 const size_t mbuf_space = mbuf->m_len - m_ofs; 1985 /* Xen shared pages have an implied size of PAGE_SIZE */ 1986 const size_t req_size = PAGE_SIZE; 1987 const size_t pkt_space = req_size - r_ofs; 1988 /* 1989 * space is the largest amount of data that can be copied in the 1990 * grant table's next entry 1991 */ 1992 const size_t space = MIN(pkt_space, mbuf_space); 1993 1994 /* TODO: handle this error condition without panicing */ 1995 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1996 1997 gnttab[gnt_idx].dest.u.ref = rxq->gref; 1998 gnttab[gnt_idx].dest.domid = otherend_id; 1999 gnttab[gnt_idx].dest.offset = r_ofs; 2000 gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( 2001 mtod(mbuf, vm_offset_t) + m_ofs); 2002 gnttab[gnt_idx].source.offset = virt_to_offset( 2003 mtod(mbuf, vm_offset_t) + m_ofs); 2004 gnttab[gnt_idx].source.domid = DOMID_SELF; 2005 gnttab[gnt_idx].len = space; 2006 gnttab[gnt_idx].flags = GNTCOPY_dest_gref; 2007 2008 gnt_idx++; 2009 2010 r_ofs += space; 2011 m_ofs += space; 2012 size_remaining -= space; 2013 if (req_size - r_ofs <= 0) { 2014 /* Must move to the next rx request */ 2015 r_ofs = 0; 2016 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 2017 } 2018 if (mbuf->m_len - m_ofs <= 0) { 2019 /* Must move to the next mbuf */ 2020 m_ofs = 0; 2021 mbuf = mbuf->m_next; 2022 } 2023 } 2024 2025 return gnt_idx; 2026} 2027 2028/** 2029 * Generates responses for all the requests that constituted pkt. Builds 2030 * responses and writes them to the ring, but doesn't push the shared ring 2031 * indices. 2032 * \param[in] pkt the packet that needs a response 2033 * \param[in] gnttab The grant copy table corresponding to this packet. 2034 * Used to determine how many rsp->netif_rx_response_t's to 2035 * generate. 2036 * \param[in] n_entries Number of relevant entries in the grant table 2037 * \param[out] ring Responses go here 2038 * \return The number of RX requests that were consumed to generate 2039 * the responses 2040 */ 2041static int 2042xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, 2043 int n_entries, netif_rx_back_ring_t *ring) 2044{ 2045 /* 2046 * This code makes the following assumptions: 2047 * * All entries in gnttab set GNTCOPY_dest_gref 2048 * * The entries in gnttab are grouped by their grefs: any two 2049 * entries with the same gref must be adjacent 2050 */ 2051 int error = 0; 2052 int gnt_idx, i; 2053 int n_responses = 0; 2054 grant_ref_t last_gref = GRANT_REF_INVALID; 2055 RING_IDX r_idx; 2056 2057 KASSERT(gnttab != NULL, ("Received a null granttable copy")); 2058 2059 /* 2060 * In the event of an error, we only need to send one response to the 2061 * netfront. In that case, we musn't write any data to the responses 2062 * after the one we send. So we must loop all the way through gnttab 2063 * looking for errors before we generate any responses 2064 * 2065 * Since we're looping through the grant table anyway, we'll count the 2066 * number of different gref's in it, which will tell us how many 2067 * responses to generate 2068 */ 2069 for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { 2070 int16_t status = gnttab[gnt_idx].status; 2071 if (status != GNTST_okay) { 2072 DPRINTF( 2073 "Got error %d for hypervisor gnttab_copy status\n", 2074 status); 2075 error = 1; 2076 break; 2077 } 2078 if (gnttab[gnt_idx].dest.u.ref != last_gref) { 2079 n_responses++; 2080 last_gref = gnttab[gnt_idx].dest.u.ref; 2081 } 2082 } 2083 2084 if (error != 0) { 2085 uint16_t id; 2086 netif_rx_response_t *rsp; 2087 2088 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; 2089 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 2090 rsp->id = id; 2091 rsp->status = NETIF_RSP_ERROR; 2092 n_responses = 1; 2093 } else { 2094 gnt_idx = 0; 2095 const int has_extra = pkt->flags & NETRXF_extra_info; 2096 if (has_extra != 0) 2097 n_responses++; 2098 2099 for (i = 0; i < n_responses; i++) { 2100 netif_rx_request_t rxq; 2101 netif_rx_response_t *rsp; 2102 2103 r_idx = ring->rsp_prod_pvt + i; 2104 /* 2105 * We copy the structure of rxq instead of making a 2106 * pointer because it shares the same memory as rsp. 2107 */ 2108 rxq = *(RING_GET_REQUEST(ring, r_idx)); 2109 rsp = RING_GET_RESPONSE(ring, r_idx); 2110 if (has_extra && (i == 1)) { 2111 netif_extra_info_t *ext = 2112 (netif_extra_info_t*)rsp; 2113 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 2114 ext->flags = 0; 2115 ext->u.gso.size = pkt->extra.u.gso.size; 2116 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 2117 ext->u.gso.pad = 0; 2118 ext->u.gso.features = 0; 2119 } else { 2120 rsp->id = rxq.id; 2121 rsp->status = GNTST_okay; 2122 rsp->offset = 0; 2123 rsp->flags = 0; 2124 if (i < pkt->list_len - 1) 2125 rsp->flags |= NETRXF_more_data; 2126 if ((i == 0) && has_extra) 2127 rsp->flags |= NETRXF_extra_info; 2128 if ((i == 0) && 2129 (pkt->flags & NETRXF_data_validated)) { 2130 rsp->flags |= NETRXF_data_validated; 2131 rsp->flags |= NETRXF_csum_blank; 2132 } 2133 rsp->status = 0; 2134 for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; 2135 gnt_idx++) { 2136 rsp->status += gnttab[gnt_idx].len; 2137 } 2138 } 2139 } 2140 } 2141 2142 ring->req_cons += n_responses; 2143 ring->rsp_prod_pvt += n_responses; 2144 return n_responses; 2145} 2146 2147#if defined(INET) || defined(INET6) 2148/** 2149 * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf 2150 * in the chain must start with a struct ether_header. 2151 * 2152 * XXX This function will perform incorrectly on UDP packets that are split up 2153 * into multiple ethernet frames. 2154 */ 2155static void 2156xnb_add_mbuf_cksum(struct mbuf *mbufc) 2157{ 2158 struct ether_header *eh; 2159 struct ip *iph; 2160 uint16_t ether_type; 2161 2162 eh = mtod(mbufc, struct ether_header*); 2163 ether_type = ntohs(eh->ether_type); 2164 if (ether_type != ETHERTYPE_IP) { 2165 /* Nothing to calculate */ 2166 return; 2167 } 2168 2169 iph = (struct ip*)(eh + 1); 2170 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2171 iph->ip_sum = 0; 2172 iph->ip_sum = in_cksum_hdr(iph); 2173 } 2174 2175 switch (iph->ip_p) { 2176 case IPPROTO_TCP: 2177 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2178 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); 2179 struct tcphdr *th = (struct tcphdr*)(iph + 1); 2180 th->th_sum = in_pseudo(iph->ip_src.s_addr, 2181 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); 2182 th->th_sum = in_cksum_skip(mbufc, 2183 sizeof(struct ether_header) + ntohs(iph->ip_len), 2184 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2185 } 2186 break; 2187 case IPPROTO_UDP: 2188 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2189 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); 2190 struct udphdr *uh = (struct udphdr*)(iph + 1); 2191 uh->uh_sum = in_pseudo(iph->ip_src.s_addr, 2192 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); 2193 uh->uh_sum = in_cksum_skip(mbufc, 2194 sizeof(struct ether_header) + ntohs(iph->ip_len), 2195 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2196 } 2197 break; 2198 default: 2199 break; 2200 } 2201} 2202#endif /* INET || INET6 */ 2203 2204static void 2205xnb_stop(struct xnb_softc *xnb) 2206{ 2207 struct ifnet *ifp; 2208 2209 mtx_assert(&xnb->sc_lock, MA_OWNED); 2210 ifp = xnb->xnb_ifp; 2211 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2212 if_link_state_change(ifp, LINK_STATE_DOWN); 2213} 2214 2215static int 2216xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2217{ 2218 struct xnb_softc *xnb = ifp->if_softc; 2219 struct ifreq *ifr = (struct ifreq*) data; 2220#ifdef INET 2221 struct ifaddr *ifa = (struct ifaddr*)data; 2222#endif 2223 int error = 0; 2224 2225 switch (cmd) { 2226 case SIOCSIFFLAGS: 2227 mtx_lock(&xnb->sc_lock); 2228 if (ifp->if_flags & IFF_UP) { 2229 xnb_ifinit_locked(xnb); 2230 } else { 2231 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2232 xnb_stop(xnb); 2233 } 2234 } 2235 /* 2236 * Note: netfront sets a variable named xn_if_flags 2237 * here, but that variable is never read 2238 */ 2239 mtx_unlock(&xnb->sc_lock); 2240 break; 2241 case SIOCSIFADDR: 2242#ifdef INET 2243 mtx_lock(&xnb->sc_lock); 2244 if (ifa->ifa_addr->sa_family == AF_INET) { 2245 ifp->if_flags |= IFF_UP; 2246 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2247 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | 2248 IFF_DRV_OACTIVE); 2249 if_link_state_change(ifp, 2250 LINK_STATE_DOWN); 2251 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2252 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2253 if_link_state_change(ifp, 2254 LINK_STATE_UP); 2255 } 2256 arp_ifinit(ifp, ifa); 2257 mtx_unlock(&xnb->sc_lock); 2258 } else { 2259 mtx_unlock(&xnb->sc_lock); 2260#endif 2261 error = ether_ioctl(ifp, cmd, data); 2262#ifdef INET 2263 } 2264#endif 2265 break; 2266 case SIOCSIFCAP: 2267 mtx_lock(&xnb->sc_lock); 2268 if (ifr->ifr_reqcap & IFCAP_TXCSUM) { 2269 ifp->if_capenable |= IFCAP_TXCSUM; 2270 ifp->if_hwassist |= XNB_CSUM_FEATURES; 2271 } else { 2272 ifp->if_capenable &= ~(IFCAP_TXCSUM); 2273 ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); 2274 } 2275 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { 2276 ifp->if_capenable |= IFCAP_RXCSUM; 2277 } else { 2278 ifp->if_capenable &= ~(IFCAP_RXCSUM); 2279 } 2280 /* 2281 * TODO enable TSO4 and LRO once we no longer need 2282 * to calculate checksums in software 2283 */ 2284#if 0 2285 if (ifr->if_reqcap |= IFCAP_TSO4) { 2286 if (IFCAP_TXCSUM & ifp->if_capenable) { 2287 printf("xnb: Xen netif requires that " 2288 "TXCSUM be enabled in order " 2289 "to use TSO4\n"); 2290 error = EINVAL; 2291 } else { 2292 ifp->if_capenable |= IFCAP_TSO4; 2293 ifp->if_hwassist |= CSUM_TSO; 2294 } 2295 } else { 2296 ifp->if_capenable &= ~(IFCAP_TSO4); 2297 ifp->if_hwassist &= ~(CSUM_TSO); 2298 } 2299 if (ifr->ifreqcap |= IFCAP_LRO) { 2300 ifp->if_capenable |= IFCAP_LRO; 2301 } else { 2302 ifp->if_capenable &= ~(IFCAP_LRO); 2303 } 2304#endif 2305 mtx_unlock(&xnb->sc_lock); 2306 break; 2307 case SIOCSIFMTU: 2308 ifp->if_mtu = ifr->ifr_mtu; 2309 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2310 xnb_ifinit(xnb); 2311 break; 2312 case SIOCADDMULTI: 2313 case SIOCDELMULTI: 2314 case SIOCSIFMEDIA: 2315 case SIOCGIFMEDIA: 2316 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); 2317 break; 2318 default: 2319 error = ether_ioctl(ifp, cmd, data); 2320 break; 2321 } 2322 return (error); 2323} 2324 2325static void 2326xnb_start_locked(struct ifnet *ifp) 2327{ 2328 netif_rx_back_ring_t *rxb; 2329 struct xnb_softc *xnb; 2330 struct mbuf *mbufc; 2331 RING_IDX req_prod_local; 2332 2333 xnb = ifp->if_softc; 2334 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 2335 2336 if (!xnb->carrier) 2337 return; 2338 2339 do { 2340 int out_of_space = 0; 2341 int notify; 2342 req_prod_local = rxb->sring->req_prod; 2343 xen_rmb(); 2344 for (;;) { 2345 int error; 2346 2347 IF_DEQUEUE(&ifp->if_snd, mbufc); 2348 if (mbufc == NULL) 2349 break; 2350 error = xnb_send(rxb, xnb->otherend_id, mbufc, 2351 xnb->rx_gnttab); 2352 switch (error) { 2353 case EAGAIN: 2354 /* 2355 * Insufficient space in the ring. 2356 * Requeue pkt and send when space is 2357 * available. 2358 */ 2359 IF_PREPEND(&ifp->if_snd, mbufc); 2360 /* 2361 * Perhaps the frontend missed an IRQ 2362 * and went to sleep. Notify it to wake 2363 * it up. 2364 */ 2365 out_of_space = 1; 2366 break; 2367 2368 case EINVAL: 2369 /* OS gave a corrupt packet. Drop it.*/ 2370 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2371 /* FALLTHROUGH */ 2372 default: 2373 /* Send succeeded, or packet had error. 2374 * Free the packet */ 2375 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2376 if (mbufc) 2377 m_freem(mbufc); 2378 break; 2379 } 2380 if (out_of_space != 0) 2381 break; 2382 } 2383 2384 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); 2385 if ((notify != 0) || (out_of_space != 0)) 2386 xen_intr_signal(xnb->xen_intr_handle); 2387 rxb->sring->req_event = req_prod_local + 1; 2388 xen_mb(); 2389 } while (rxb->sring->req_prod != req_prod_local) ; 2390} 2391 2392/** 2393 * Sends one packet to the ring. Blocks until the packet is on the ring 2394 * \param[in] mbufc Contains one packet to send. Caller must free 2395 * \param[in,out] rxb The packet will be pushed onto this ring, but the 2396 * otherend will not be notified. 2397 * \param[in] otherend The domain ID of the other end of the connection 2398 * \retval EAGAIN The ring did not have enough space for the packet. 2399 * The ring has not been modified 2400 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 2401 * this a function parameter so that we will take less 2402 * stack space. 2403 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 2404 */ 2405static int 2406xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, 2407 gnttab_copy_table gnttab) 2408{ 2409 struct xnb_pkt pkt; 2410 int error, n_entries, n_reqs; 2411 RING_IDX space; 2412 2413 space = ring->sring->req_prod - ring->req_cons; 2414 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); 2415 if (error != 0) 2416 return error; 2417 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); 2418 if (n_entries != 0) { 2419 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 2420 gnttab, n_entries); 2421 KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", 2422 hv_ret)); 2423 } 2424 2425 n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); 2426 2427 return 0; 2428} 2429 2430static void 2431xnb_start(struct ifnet *ifp) 2432{ 2433 struct xnb_softc *xnb; 2434 2435 xnb = ifp->if_softc; 2436 mtx_lock(&xnb->rx_lock); 2437 xnb_start_locked(ifp); 2438 mtx_unlock(&xnb->rx_lock); 2439} 2440 2441/* equivalent of network_open() in Linux */ 2442static void 2443xnb_ifinit_locked(struct xnb_softc *xnb) 2444{ 2445 struct ifnet *ifp; 2446 2447 ifp = xnb->xnb_ifp; 2448 2449 mtx_assert(&xnb->sc_lock, MA_OWNED); 2450 2451 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2452 return; 2453 2454 xnb_stop(xnb); 2455 2456 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2457 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2458 if_link_state_change(ifp, LINK_STATE_UP); 2459} 2460 2461 2462static void 2463xnb_ifinit(void *xsc) 2464{ 2465 struct xnb_softc *xnb = xsc; 2466 2467 mtx_lock(&xnb->sc_lock); 2468 xnb_ifinit_locked(xnb); 2469 mtx_unlock(&xnb->sc_lock); 2470} 2471 2472/** 2473 * Callback used by the generic networking code to tell us when our carrier 2474 * state has changed. Since we don't have a physical carrier, we don't care 2475 */ 2476static int 2477xnb_ifmedia_upd(struct ifnet *ifp) 2478{ 2479 return (0); 2480} 2481 2482/** 2483 * Callback used by the generic networking code to ask us what our carrier 2484 * state is. Since we don't have a physical carrier, this is very simple 2485 */ 2486static void 2487xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2488{ 2489 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2490 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2491} 2492 2493 2494/*---------------------------- NewBus Registration ---------------------------*/ 2495static device_method_t xnb_methods[] = { 2496 /* Device interface */ 2497 DEVMETHOD(device_probe, xnb_probe), 2498 DEVMETHOD(device_attach, xnb_attach), 2499 DEVMETHOD(device_detach, xnb_detach), 2500 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2501 DEVMETHOD(device_suspend, xnb_suspend), 2502 DEVMETHOD(device_resume, xnb_resume), 2503 2504 /* Xenbus interface */ 2505 DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), 2506 2507 { 0, 0 } 2508}; 2509 2510static driver_t xnb_driver = { 2511 "xnb", 2512 xnb_methods, 2513 sizeof(struct xnb_softc), 2514}; 2515devclass_t xnb_devclass; 2516 2517DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); 2518 2519 2520/*-------------------------- Unit Tests -------------------------------------*/ 2521#ifdef XNB_DEBUG 2522#include "netback_unit_tests.c" 2523#endif
|