Lines Matching refs:priv

38 void gve_parse_device_option(struct gve_priv *priv,
60 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
68 dev_info(&priv->pdev->dev,
70 priv->queue_format = GVE_GQI_RDA_FORMAT;
75 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
83 dev_warn(&priv->pdev->dev,
91 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
99 dev_warn(&priv->pdev->dev,
107 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
115 dev_warn(&priv->pdev->dev,
123 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
131 dev_warn(&priv->pdev->dev,
139 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
148 dev_warn(&priv->pdev->dev,
157 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
166 dev_warn(&priv->pdev->dev,
174 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
182 dev_warn(&priv->pdev->dev,
190 priv->default_min_ring_size = true;
196 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
203 gve_process_device_options(struct gve_priv *priv,
224 dev_err(&priv->dev->dev,
229 gve_parse_device_option(priv, descriptor, dev_opt,
240 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
242 priv->adminq_pool = dma_pool_create("adminq_pool", dev,
244 if (unlikely(!priv->adminq_pool))
246 priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
247 &priv->adminq_bus_addr);
248 if (unlikely(!priv->adminq)) {
249 dma_pool_destroy(priv->adminq_pool);
253 priv->adminq_mask =
255 priv->adminq_prod_cnt = 0;
256 priv->adminq_cmd_fail = 0;
257 priv->adminq_timeouts = 0;
258 priv->adminq_describe_device_cnt = 0;
259 priv->adminq_cfg_device_resources_cnt = 0;
260 priv->adminq_register_page_list_cnt = 0;
261 priv->adminq_unregister_page_list_cnt = 0;
262 priv->adminq_create_tx_queue_cnt = 0;
263 priv->adminq_create_rx_queue_cnt = 0;
264 priv->adminq_destroy_tx_queue_cnt = 0;
265 priv->adminq_destroy_rx_queue_cnt = 0;
266 priv->adminq_dcfg_device_resources_cnt = 0;
267 priv->adminq_set_driver_parameter_cnt = 0;
268 priv->adminq_report_stats_cnt = 0;
269 priv->adminq_report_link_speed_cnt = 0;
270 priv->adminq_get_ptype_map_cnt = 0;
273 if (priv->pdev->revision < 0x1) {
274 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
275 &priv->reg_bar0->adminq_pfn);
278 &priv->reg_bar0->adminq_length);
280 iowrite32be(priv->adminq_bus_addr >> 32,
281 &priv->reg_bar0->adminq_base_address_hi);
283 iowrite32be(priv->adminq_bus_addr,
284 &priv->reg_bar0->adminq_base_address_lo);
285 iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
287 gve_set_admin_queue_ok(priv);
291 void gve_adminq_release(struct gve_priv *priv)
296 if (priv->pdev->revision < 0x1) {
297 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
298 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
309 iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status);
310 while (!(ioread32be(&priv->reg_bar0->device_status)
318 gve_clear_device_rings_ok(priv);
319 gve_clear_device_resources_ok(priv);
320 gve_clear_admin_queue_ok(priv);
323 void gve_adminq_free(struct device *dev, struct gve_priv *priv)
325 if (!gve_get_admin_queue_ok(priv))
327 gve_adminq_release(priv);
328 dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr);
329 dma_pool_destroy(priv->adminq_pool);
330 gve_clear_admin_queue_ok(priv);
333 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
335 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
338 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
343 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
352 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
356 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
357 priv->adminq_cmd_fail++;
363 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
388 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
396 static int gve_adminq_kick_and_wait(struct gve_priv *priv)
401 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
402 head = priv->adminq_prod_cnt;
404 gve_adminq_kick_cmd(priv, head);
405 if (!gve_adminq_wait_for_cmd(priv, head)) {
406 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
407 priv->adminq_timeouts++;
415 cmd = &priv->adminq[i & priv->adminq_mask];
417 err = gve_adminq_parse_err(priv, status);
429 static int gve_adminq_issue_cmd(struct gve_priv *priv,
436 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
439 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
440 (tail & priv->adminq_mask)) {
444 err = gve_adminq_kick_and_wait(priv);
449 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
450 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
451 (tail & priv->adminq_mask)) {
458 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
459 priv->adminq_prod_cnt++;
466 priv->adminq_describe_device_cnt++;
469 priv->adminq_cfg_device_resources_cnt++;
472 priv->adminq_register_page_list_cnt++;
475 priv->adminq_unregister_page_list_cnt++;
478 priv->adminq_create_tx_queue_cnt++;
481 priv->adminq_create_rx_queue_cnt++;
484 priv->adminq_destroy_tx_queue_cnt++;
487 priv->adminq_destroy_rx_queue_cnt++;
490 priv->adminq_dcfg_device_resources_cnt++;
493 priv->adminq_set_driver_parameter_cnt++;
496 priv->adminq_report_stats_cnt++;
499 priv->adminq_report_link_speed_cnt++;
502 priv->adminq_get_ptype_map_cnt++;
505 priv->adminq_verify_driver_compatibility_cnt++;
508 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
519 static int gve_adminq_execute_cmd(struct gve_priv *priv,
525 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
526 head = priv->adminq_prod_cnt;
531 err = gve_adminq_issue_cmd(priv, cmd_orig);
535 return gve_adminq_kick_and_wait(priv);
546 int gve_adminq_configure_device_resources(struct gve_priv *priv,
562 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
565 .queue_format = priv->queue_format,
568 return gve_adminq_execute_cmd(priv, &cmd);
571 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
578 return gve_adminq_execute_cmd(priv, &cmd);
581 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
583 struct gve_tx_ring *tx = &priv->tx[queue_index];
594 .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
597 if (gve_is_gqi(priv)) {
598 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
605 if (priv->queue_format == GVE_DQO_RDA_FORMAT)
613 cpu_to_be16(priv->tx_desc_cnt);
616 return gve_adminq_issue_cmd(priv, &cmd);
619 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
625 err = gve_adminq_create_tx_queue(priv, i);
630 return gve_adminq_kick_and_wait(priv);
633 static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
637 struct gve_rx_ring *rx = &priv->rx[queue_index];
645 .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
648 if (gve_is_gqi(priv)) {
649 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
662 if (priv->queue_format == GVE_DQO_RDA_FORMAT)
672 cpu_to_be16(priv->data_buffer_size_dqo);
674 cpu_to_be16(priv->rx_desc_cnt);
676 !!(priv->dev->features & NETIF_F_LRO);
677 if (priv->header_split_enabled)
679 cpu_to_be16(priv->header_buf_size);
683 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
687 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
688 return gve_adminq_issue_cmd(priv, &cmd);
692 int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
696 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
697 return gve_adminq_execute_cmd(priv, &cmd);
700 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
706 err = gve_adminq_create_rx_queue(priv, i);
711 return gve_adminq_kick_and_wait(priv);
714 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
725 err = gve_adminq_issue_cmd(priv, &cmd);
732 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
738 err = gve_adminq_destroy_tx_queue(priv, i);
743 return gve_adminq_kick_and_wait(priv);
756 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
761 return gve_adminq_issue_cmd(priv, &cmd);
765 int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
770 return gve_adminq_execute_cmd(priv, &cmd);
773 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
779 err = gve_adminq_destroy_rx_queue(priv, i);
784 return gve_adminq_kick_and_wait(priv);
787 static void gve_set_default_desc_cnt(struct gve_priv *priv,
790 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
791 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
794 priv->max_tx_desc_cnt = priv->tx_desc_cnt;
795 priv->max_rx_desc_cnt = priv->rx_desc_cnt;
796 priv->min_tx_desc_cnt = priv->tx_desc_cnt;
797 priv->min_rx_desc_cnt = priv->rx_desc_cnt;
800 static void gve_enable_supported_features(struct gve_priv *priv,
813 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
817 dev_info(&priv->pdev->dev,
819 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
824 priv->tx_pages_per_qpl =
826 if (priv->tx_pages_per_qpl == 0)
827 priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
832 priv->max_rx_buffer_size =
834 priv->header_buf_size =
836 dev_info(&priv->pdev->dev,
838 priv->max_rx_buffer_size, priv->header_buf_size);
844 priv->modify_ring_size_enabled = true;
847 if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
848 priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
849 priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
851 if (priv->default_min_ring_size) {
853 priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
854 priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
856 priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
857 priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
862 int gve_adminq_describe_device(struct gve_priv *priv)
880 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
892 err = gve_adminq_execute_cmd(priv, &cmd);
896 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
909 priv->queue_format = GVE_DQO_RDA_FORMAT;
910 dev_info(&priv->pdev->dev,
915 priv->queue_format = GVE_DQO_QPL_FORMAT;
919 priv->queue_format = GVE_GQI_RDA_FORMAT;
920 dev_info(&priv->pdev->dev,
924 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
925 dev_info(&priv->pdev->dev,
928 priv->queue_format = GVE_GQI_QPL_FORMAT;
932 dev_info(&priv->pdev->dev,
937 gve_set_default_desc_cnt(priv, descriptor);
940 if (!gve_is_gqi(priv))
941 priv->dev->hw_features |= NETIF_F_LRO;
943 priv->max_registered_pages =
947 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
951 priv->dev->max_mtu = mtu;
952 priv->num_event_counters = be16_to_cpu(descriptor->counters);
953 eth_hw_addr_set(priv->dev, descriptor->mac);
955 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
956 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
957 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
959 gve_enable_supported_features(priv, supported_features_mask,
964 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
968 int gve_adminq_register_page_list(struct gve_priv *priv,
971 struct device *hdev = &priv->pdev->dev;
996 err = gve_adminq_execute_cmd(priv, &cmd);
1001 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
1011 return gve_adminq_execute_cmd(priv, &cmd);
1014 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
1025 return gve_adminq_execute_cmd(priv, &cmd);
1028 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
1041 return gve_adminq_execute_cmd(priv, &cmd);
1044 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
1057 return gve_adminq_execute_cmd(priv, &cmd);
1060 int gve_adminq_report_link_speed(struct gve_priv *priv)
1068 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
1079 err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
1081 priv->link_speed = be64_to_cpu(*link_speed_region);
1082 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
1087 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
1097 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
1108 err = gve_adminq_execute_cmd(priv, &cmd);
1120 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,