1// Copyright 2017 The Fuchsia Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "ethernet.h" 6 7#include <assert.h> 8#include <limits.h> 9#include <stddef.h> 10#include <stdint.h> 11#include <string.h> 12 13#include <ddk/debug.h> 14#include <ddk/io-buffer.h> 15#include <ddk/protocol/ethernet.h> 16#include <fbl/algorithm.h> 17#include <fbl/alloc_checker.h> 18#include <fbl/auto_call.h> 19#include <fbl/auto_lock.h> 20#include <fbl/unique_ptr.h> 21#include <pretty/hexdump.h> 22#include <virtio/net.h> 23#include <virtio/virtio.h> 24#include <zircon/assert.h> 25#include <zircon/status.h> 26#include <zircon/types.h> 27 28#include "ring.h" 29#include "trace.h" 30 31// Enables/disables debugging info 32#define LOCAL_TRACE 0 33 34namespace virtio { 35 36namespace { 37 38// Specifies how many packets can fit in each of the receive and transmit 39// backlogs. 40const size_t kBacklog = 32; 41 42// Specifies the maximum transfer unit we support and the maximum layer 1 43// Ethernet packet header length. 44const size_t kVirtioMtu = 1500; 45const size_t kL1EthHdrLen = 26; 46 47// Other constants determined by the values above and the memory architecture. 48// The goal here is to allocate single-page I/O buffers. 49const size_t kFrameSize = sizeof(virtio_net_hdr_t) + kL1EthHdrLen + kVirtioMtu; 50const size_t kFramesInBuf = PAGE_SIZE / kFrameSize; 51const size_t kNumIoBufs = fbl::round_up(kBacklog * 2, kFramesInBuf) / kFramesInBuf; 52 53const uint16_t kRxId = 0u; 54const uint16_t kTxId = 1u; 55 56// Strictly for convenience... 57typedef struct vring_desc desc_t; 58 59// Device bridge helpers 60void virtio_net_unbind(void* ctx) { 61 virtio::EthernetDevice* eth = static_cast<virtio::EthernetDevice*>(ctx); 62 eth->Unbind(); 63} 64 65void virtio_net_release(void* ctx) { 66 fbl::unique_ptr<virtio::EthernetDevice> eth(static_cast<virtio::EthernetDevice*>(ctx)); 67 eth->Release(); 68} 69 70zx_protocol_device_t kDeviceOps = { 71 DEVICE_OPS_VERSION, 72 nullptr, // get_protocol 73 nullptr, // open 74 nullptr, // openat 75 nullptr, // close 76 virtio_net_unbind, 77 virtio_net_release, 78 nullptr, // read 79 nullptr, // write 80 nullptr, // get_size 81 nullptr, // ioctl 82 nullptr, // suspend 83 nullptr, // resume 84 nullptr, // rxrpc 85 nullptr, // rxmsg 86}; 87 88// Protocol bridge helpers 89zx_status_t virtio_net_query(void* ctx, uint32_t options, ethmac_info_t* info) { 90 virtio::EthernetDevice* eth = static_cast<virtio::EthernetDevice*>(ctx); 91 return eth->Query(options, info); 92} 93 94void virtio_net_stop(void* ctx) { 95 virtio::EthernetDevice* eth = static_cast<virtio::EthernetDevice*>(ctx); 96 eth->Stop(); 97} 98 99zx_status_t virtio_net_start(void* ctx, ethmac_ifc_t* ifc, void* cookie) { 100 virtio::EthernetDevice* eth = static_cast<virtio::EthernetDevice*>(ctx); 101 return eth->Start(ifc, cookie); 102} 103 104zx_status_t virtio_net_queue_tx(void* ctx, uint32_t options, ethmac_netbuf_t* netbuf) { 105 virtio::EthernetDevice* eth = static_cast<virtio::EthernetDevice*>(ctx); 106 return eth->QueueTx(options, netbuf); 107} 108 109static zx_status_t virtio_set_param(void* ctx, uint32_t param, int32_t value, void* data) { 110 return ZX_ERR_NOT_SUPPORTED; 111} 112 113ethmac_protocol_ops_t kProtoOps = { 114 virtio_net_query, 115 virtio_net_stop, 116 virtio_net_start, 117 virtio_net_queue_tx, 118 virtio_set_param, 119 NULL, // get_bti not implemented because we don't have FEATURE_DMA 120}; 121 122// I/O buffer helpers 123zx_status_t InitBuffers(const zx::bti& bti, fbl::unique_ptr<io_buffer_t[]>* out) { 124 zx_status_t rc; 125 fbl::AllocChecker ac; 126 fbl::unique_ptr<io_buffer_t[]> bufs(new (&ac) io_buffer_t[kNumIoBufs]); 127 if (!ac.check()) { 128 zxlogf(ERROR, "out of memory!\n"); 129 return ZX_ERR_NO_MEMORY; 130 } 131 memset(bufs.get(), 0, sizeof(io_buffer_t) * kNumIoBufs); 132 size_t buf_size = kFrameSize * kFramesInBuf; 133 for (uint16_t id = 0; id < kNumIoBufs; ++id) { 134 if ((rc = io_buffer_init(&bufs[id], bti.get(), buf_size, 135 IO_BUFFER_RW | IO_BUFFER_CONTIG)) != ZX_OK) { 136 zxlogf(ERROR, "failed to allocate I/O buffers: %s\n", zx_status_get_string(rc)); 137 return rc; 138 } 139 } 140 *out = fbl::move(bufs); 141 return ZX_OK; 142} 143 144void ReleaseBuffers(fbl::unique_ptr<io_buffer_t[]> bufs) { 145 if (!bufs) { 146 return; 147 } 148 for (size_t i = 0; i < kNumIoBufs; ++i) { 149 if (io_buffer_is_valid(&bufs[i])) { 150 io_buffer_release(&bufs[i]); 151 } 152 } 153} 154 155// Frame access helpers 156zx_off_t GetFrame(io_buffer_t** bufs, uint16_t ring_id, uint16_t desc_id) { 157 uint16_t i = static_cast<uint16_t>(desc_id + ring_id * kBacklog); 158 *bufs = &((*bufs)[i / kFramesInBuf]); 159 return (i % kFramesInBuf) * kFrameSize; 160} 161 162void* GetFrameVirt(io_buffer_t* bufs, uint16_t ring_id, uint16_t desc_id) { 163 zx_off_t offset = GetFrame(&bufs, ring_id, desc_id); 164 uintptr_t vaddr = reinterpret_cast<uintptr_t>(io_buffer_virt(bufs)); 165 return reinterpret_cast<void*>(vaddr + offset); 166} 167 168zx_paddr_t GetFramePhys(io_buffer_t* bufs, uint16_t ring_id, uint16_t desc_id) { 169 zx_off_t offset = GetFrame(&bufs, ring_id, desc_id); 170 return io_buffer_phys(bufs) + offset; 171} 172 173virtio_net_hdr_t* GetFrameHdr(io_buffer_t* bufs, uint16_t ring_id, uint16_t desc_id) { 174 return reinterpret_cast<virtio_net_hdr_t*>(GetFrameVirt(bufs, ring_id, desc_id)); 175} 176 177uint8_t* GetFrameData(io_buffer_t* bufs, uint16_t ring_id, uint16_t desc_id, size_t hdr_size) { 178 uintptr_t vaddr = reinterpret_cast<uintptr_t>(GetFrameHdr(bufs, ring_id, desc_id)); 179 return reinterpret_cast<uint8_t*>(vaddr + hdr_size); 180} 181 182} // namespace 183 184EthernetDevice::EthernetDevice(zx_device_t* bus_device, zx::bti bti, fbl::unique_ptr<Backend> backend) 185 : Device(bus_device, fbl::move(bti), fbl::move(backend)), rx_(this), tx_(this), bufs_(nullptr), 186 unkicked_(0), ifc_(nullptr), cookie_(nullptr) { 187} 188 189EthernetDevice::~EthernetDevice() { 190 LTRACE_ENTRY; 191} 192 193zx_status_t EthernetDevice::Init() { 194 LTRACE_ENTRY; 195 zx_status_t rc; 196 if (mtx_init(&state_lock_, mtx_plain) != thrd_success || 197 mtx_init(&tx_lock_, mtx_plain) != thrd_success) { 198 return ZX_ERR_NO_RESOURCES; 199 } 200 fbl::AutoLock lock(&state_lock_); 201 202 // Reset the device and read our configuration 203 DeviceReset(); 204 CopyDeviceConfig(&config_, sizeof(config_)); 205 LTRACEF("mac %02x:%02x:%02x:%02x:%02x:%02x\n", config_.mac[0], config_.mac[1], config_.mac[2], 206 config_.mac[3], config_.mac[4], config_.mac[5]); 207 LTRACEF("status %u\n", config_.status); 208 LTRACEF("max_virtqueue_pairs %u\n", config_.max_virtqueue_pairs); 209 210 // Ack and set the driver status bit 211 DriverStatusAck(); 212 213 virtio_hdr_len_ = sizeof(virtio_net_hdr_t); 214 if (DeviceFeatureSupported(VIRTIO_F_VERSION_1)) { 215 DriverFeatureAck(VIRTIO_F_VERSION_1); 216 } else { 217 // 5.1.6.1 Legacy Interface: Device Operation 218 // 219 // The legacy driver only presented num_buffers in the struct 220 // virtio_net_hdr when VIRTIO_NET_F_MRG_RXBUF was negotiated; without 221 // that feature the structure was 2 bytes shorter. 222 virtio_hdr_len_ -= 2; 223 } 224 225 // TODO(aarongreen): Check additional features bits and ack/nak them 226 rc = DeviceStatusFeaturesOk(); 227 if (rc != ZX_OK) { 228 zxlogf(ERROR, "%s: Feature negotiation failed (%d)\n", tag(), rc); 229 return rc; 230 } 231 232 // Plan to clean up unless everything goes right. 233 auto cleanup = fbl::MakeAutoCall([this]() { Release(); }); 234 235 // Allocate I/O buffers and virtqueues. 236 uint16_t num_descs = static_cast<uint16_t>(kBacklog & 0xffff); 237 if ((rc = InitBuffers(bti_, &bufs_)) != ZX_OK || (rc = rx_.Init(kRxId, num_descs)) != ZX_OK || 238 (rc = tx_.Init(kTxId, num_descs)) != ZX_OK) { 239 zxlogf(ERROR, "failed to allocate virtqueue: %s\n", zx_status_get_string(rc)); 240 return rc; 241 } 242 243 // Associate the I/O buffers with the virtqueue descriptors 244 desc_t* desc = nullptr; 245 uint16_t id; 246 247 // For rx buffers, we queue a bunch of "reads" from the network that 248 // complete when packets arrive. 249 for (uint16_t i = 0; i < num_descs; ++i) { 250 desc = rx_.AllocDescChain(1, &id); 251 desc->addr = GetFramePhys(bufs_.get(), kRxId, id); 252 desc->len = kFrameSize; 253 desc->flags |= VRING_DESC_F_WRITE; 254 LTRACE_DO(virtio_dump_desc(desc)); 255 rx_.SubmitChain(id); 256 } 257 258 // For tx buffers, we hold onto them until we need to send a packet. 259 for (uint16_t id = 0; id < num_descs; ++id) { 260 desc = tx_.DescFromIndex(id); 261 desc->addr = GetFramePhys(bufs_.get(), kTxId, id); 262 desc->len = 0; 263 desc->flags &= static_cast<uint16_t>(~VRING_DESC_F_WRITE); 264 LTRACE_DO(virtio_dump_desc(desc)); 265 } 266 267 // Start the interrupt thread and set the driver OK status 268 StartIrqThread(); 269 270 // Initialize the zx_device and publish us 271 device_add_args_t args; 272 memset(&args, 0, sizeof(args)); 273 args.version = DEVICE_ADD_ARGS_VERSION; 274 args.name = "virtio-net"; 275 args.ctx = this; 276 args.ops = &kDeviceOps; 277 args.proto_id = ZX_PROTOCOL_ETHERNET_IMPL; 278 args.proto_ops = &kProtoOps; 279 if ((rc = device_add(bus_device_, &args, &device_)) != ZX_OK) { 280 zxlogf(ERROR, "failed to add device: %s\n", zx_status_get_string(rc)); 281 return rc; 282 } 283 // Give the rx buffers to the host 284 rx_.Kick(); 285 286 // Woohoo! Driver should be ready. 287 cleanup.cancel(); 288 DriverStatusOk(); 289 return ZX_OK; 290} 291 292void EthernetDevice::Release() { 293 LTRACE_ENTRY; 294 fbl::AutoLock lock(&state_lock_); 295 ReleaseLocked(); 296} 297 298void EthernetDevice::ReleaseLocked() { 299 ifc_ = nullptr; 300 ReleaseBuffers(fbl::move(bufs_)); 301 Device::Release(); 302} 303 304void EthernetDevice::IrqRingUpdate() { 305 LTRACE_ENTRY; 306 // Lock to prevent changes to ifc_. 307 { 308 fbl::AutoLock lock(&state_lock_); 309 if (!ifc_) { 310 return; 311 } 312 // Ring::IrqRingUpdate will call this lambda on each rx buffer filled by 313 // the underlying device since the last IRQ. 314 // Thread safety analysis is explicitly disabled as clang isn't able to determine that the 315 // state_lock_ is held when the lambda invoked. 316 rx_.IrqRingUpdate([this](vring_used_elem* used_elem) TA_NO_THREAD_SAFETY_ANALYSIS { 317 uint16_t id = static_cast<uint16_t>(used_elem->id & 0xffff); 318 desc_t* desc = rx_.DescFromIndex(id); 319 320 // Transitional driver does not merge rx buffers. 321 assert(used_elem->len < desc->len); 322 uint8_t* data = GetFrameData(bufs_.get(), kRxId, id, virtio_hdr_len_); 323 size_t len = used_elem->len - virtio_hdr_len_; 324 LTRACEF("Receiving %zu bytes:\n", len); 325 LTRACE_DO(hexdump8_ex(data, len, 0)); 326 327 // Pass the data up the stack to the generic Ethernet driver 328 ifc_->recv(cookie_, data, len, 0); 329 assert((desc->flags & VRING_DESC_F_NEXT) == 0); 330 LTRACE_DO(virtio_dump_desc(desc)); 331 rx_.FreeDesc(id); 332 }); 333 } 334 335 // Now recycle the rx buffers. As in Init(), this means queuing a bunch of 336 // "reads" from the network that will complete when packets arrive. 337 desc_t* desc = nullptr; 338 uint16_t id; 339 bool need_kick = false; 340 while ((desc = rx_.AllocDescChain(1, &id))) { 341 desc->len = kFrameSize; 342 rx_.SubmitChain(id); 343 need_kick = true; 344 } 345 346 // If we have re-queued any rx buffers, poke the virtqueue to pick them up. 347 if (need_kick) { 348 rx_.Kick(); 349 } 350} 351 352void EthernetDevice::IrqConfigChange() { 353 LTRACE_ENTRY; 354 fbl::AutoLock lock(&state_lock_); 355 if (!ifc_) { 356 return; 357 } 358 359 // Re-read our configuration 360 CopyDeviceConfig(&config_, sizeof(config_)); 361 ifc_->status(cookie_, (config_.status & VIRTIO_NET_S_LINK_UP) ? ETH_STATUS_ONLINE : 0); 362} 363 364zx_status_t EthernetDevice::Query(uint32_t options, ethmac_info_t* info) { 365 LTRACE_ENTRY; 366 if (options) { 367 return ZX_ERR_INVALID_ARGS; 368 } 369 fbl::AutoLock lock(&state_lock_); 370 if (info) { 371 // TODO(aarongreen): Add info->features = GetFeatures(); 372 info->mtu = kVirtioMtu; 373 memcpy(info->mac, config_.mac, sizeof(info->mac)); 374 } 375 return ZX_OK; 376} 377 378void EthernetDevice::Stop() { 379 LTRACE_ENTRY; 380 fbl::AutoLock lock(&state_lock_); 381 ifc_ = nullptr; 382} 383 384zx_status_t EthernetDevice::Start(ethmac_ifc_t* ifc, void* cookie) { 385 LTRACE_ENTRY; 386 if (!ifc) { 387 return ZX_ERR_INVALID_ARGS; 388 } 389 fbl::AutoLock lock(&state_lock_); 390 if (!bufs_ || ifc_) { 391 return ZX_ERR_BAD_STATE; 392 } 393 ifc_ = ifc; 394 cookie_ = cookie; 395 ifc_->status(cookie_, (config_.status & VIRTIO_NET_S_LINK_UP) ? ETH_STATUS_ONLINE : 0); 396 return ZX_OK; 397} 398 399zx_status_t EthernetDevice::QueueTx(uint32_t options, ethmac_netbuf_t* netbuf) { 400 LTRACE_ENTRY; 401 void* data = netbuf->data; 402 size_t length = netbuf->len; 403 // First, validate the packet 404 if (!data || length > virtio_hdr_len_ + kVirtioMtu) { 405 LTRACEF("dropping packet; invalid packet\n"); 406 return ZX_ERR_INVALID_ARGS; 407 } 408 409 fbl::AutoLock lock(&tx_lock_); 410 411 // Flush outstanding descriptors. Ring::IrqRingUpdate will call this lambda 412 // on each sent tx_buffer, allowing us to reclaim them. 413 auto flush = [this](vring_used_elem* used_elem) { 414 uint16_t id = static_cast<uint16_t>(used_elem->id & 0xffff); 415 desc_t* desc = tx_.DescFromIndex(id); 416 assert((desc->flags & VRING_DESC_F_NEXT) == 0); 417 LTRACE_DO(virtio_dump_desc(desc)); 418 tx_.FreeDesc(id); 419 }; 420 421 // Grab a free descriptor 422 uint16_t id; 423 desc_t* desc = tx_.AllocDescChain(1, &id); 424 if (!desc) { 425 tx_.IrqRingUpdate(flush); 426 desc = tx_.AllocDescChain(1, &id); 427 } 428 if (!desc) { 429 LTRACEF("dropping packet; out of descriptors\n"); 430 return ZX_ERR_NO_RESOURCES; 431 } 432 433 // Add the data to be sent 434 virtio_net_hdr_t* tx_hdr = GetFrameHdr(bufs_.get(), kTxId, id); 435 memset(tx_hdr, 0, virtio_hdr_len_); 436 437 // 5.1.6.2.1 Driver Requirements: Packet Transmission 438 // 439 // The driver MUST set num_buffers to zero. 440 // 441 // Implementation note: This field doesn't exist if neither 442 // |VIRTIO_F_VERSION_1| or |VIRTIO_F_MRG_RXBUF| have been negotiated. Since 443 // this field will be part of the payload without these features we elide 444 // the check as we know the memory is valid and will soon be overwritten 445 // with packet data. 446 tx_hdr->num_buffers = 0; 447 448 // If VIRTIO_NET_F_CSUM is not negotiated, the driver MUST set flags to 449 // zero and SHOULD supply a fully checksummed packet to the device. 450 tx_hdr->flags = 0; 451 452 // If none of the VIRTIO_NET_F_HOST_TSO4, TSO6 or UFO options have been 453 // negotiated, the driver MUST set gso_type to VIRTIO_NET_HDR_GSO_NONE. 454 tx_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 455 456 void* tx_buf = GetFrameData(bufs_.get(), kTxId, id, virtio_hdr_len_); 457 memcpy(tx_buf, data, length); 458 desc->len = static_cast<uint32_t>(virtio_hdr_len_ + length); 459 460 // Submit the descriptor and notify the back-end. 461 LTRACE_DO(virtio_dump_desc(desc)); 462 LTRACEF("Sending %zu bytes:\n", length); 463 LTRACE_DO(hexdump8_ex(tx_buf, length, 0)); 464 tx_.SubmitChain(id); 465 ++unkicked_; 466 if ((options & ETHMAC_TX_OPT_MORE) == 0 || unkicked_ > kBacklog / 2) { 467 tx_.Kick(); 468 unkicked_ = 0; 469 } 470 return ZX_OK; 471} 472 473} // namespace virtio 474