• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/net/mlx4/

Lines Matching defs:eq

108 static void eq_set_ci(struct mlx4_eq *eq, int req_not) {
109 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
110 req_not << 31), eq->doorbell);
119 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
121 (entry & (eq->nent - 1)) gives us a cyclic array
122 unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
128 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
131 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
133 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
134 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
454 static int mlx4_eq_int(struct mlx4_priv *priv, struct mlx4_eq *eq)
471 while ((eqe = next_eqe_sw(eq, priv->dev.caps.eqe_factor))) {
504 eq->eqn, eq->cons_index, ret);
535 eq->eqn, eq->cons_index, ret);
630 eq->eqn, eq->cons_index, ret);
646 MLX4_DEBUG( "EQ overrun on EQN %d\n", eq->eqn);
725 eqe->type, eqe->subtype, eq->eqn,
726 eq->cons_index, eqe->owner, eq->nent,
729 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
751 eqe->type, eqe->subtype, eq->eqn,
752 eq->cons_index, eqe->owner, eq->nent,
754 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
765 eqe->type, eqe->subtype, eq->eqn,
766 eq->cons_index, eqe->owner, eq->nent,
769 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
773 ++eq->cons_index;
785 eq_set_ci(eq, 0);
790 eq_set_ci(eq, 1);
805 work |= mlx4_eq_int(&priv->dev, &priv->eq_table.eq[i]);
812 struct mlx4_eq *eq = eq_ptr;
813 struct mlx4_dev *dev = eq->dev;
815 mlx4_eq_int(&priv->dev, eq);
879 static void *mlx4_get_eq_uar(struct mlx4_priv *priv, struct mlx4_eq *eq) {
882 index = eq->eqn / 4 - priv->dev.caps.reserved_eqs / 4;
886 + ((eq->eqn / 4) << PAGE_SHIFT);
889 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
905 struct mlx4_eq *eq) {
915 eq->priv = priv;
916 eq->nent = roundup_pow_of_two(max(nent, 2));
919 eq->nent * (MLX4_EQ_ENTRY_SIZE << priv->dev.caps.eqe_factor))
922 eq->page_list = malloc(npages * sizeof *eq->page_list);
923 if (!eq->page_list)
927 eq->page_list[i].buf = NULL;
939 eq->page_list[i].buf = dma_alloc(BASE_PAGE_SIZE, &t);
940 if (!eq->page_list[i].buf)
944 eq->page_list[i].map = t;
946 memset(eq->page_list[i].buf, 0, BASE_PAGE_SIZE);
949 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
950 if (eq->eqn == -1)
953 eq->doorbell = mlx4_get_eq_uar(priv, eq);
954 if (!eq->doorbell) {
959 err = mlx4_mtt_init(&priv->dev, npages, PAGE_SHIFT, &eq->mtt);
963 err = mlx4_write_mtt(&priv->dev, &eq->mtt, 0, npages, dma_list);
970 eq_context->log_eq_size = ilog2(eq->nent);
974 /*printf("mtt_addr: %lx\n", mlx4_mtt_addr(&priv->dev, &eq->mtt));
975 printf("off: %d\n", eq->mtt.offset);
978 mtt_addr = mlx4_mtt_addr(&priv->dev, &eq->mtt);
982 err = mlx4_SW2HW_EQ(priv, mailbox, eq->eqn);
991 eq->cons_index = 0;
996 err_out_free_mtt: /*mlx4_mtt_cleanup(&priv->dev, &eq->mtt);*/
998 err_out_free_eq: /*mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn,
1002 if (eq->page_list[i].buf)
1004 eq->page_list[i].buf, eq->page_list[i].map);*/
1008 err_out_free: free(eq->page_list);
1015 struct mlx4_eq *eq)
1022 int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << priv->dev.caps.eqe_factor) * eq->nent) / PAGE_SIZE;
1028 err = mlx4_HW2SW_EQ(&priv->dev, mailbox, eq->eqn);
1033 MLX4_DEBUG( "Dumping EQ context %02x:\n", eq->eqn);
1043 mlx4_mtt_cleanup(&priv->dev, &eq->mtt);
1046 eq->page_list[i].buf,
1047 eq->page_list[i].map);
1049 kfree(eq->page_list);
1050 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1064 if (eq_table->eq[i].have_irq) {
1065 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
1066 eq_table->eq[i].have_irq = 0;
1077 free_irq(priv->eq_table.eq[vec].irq,
1078 &priv->eq_table.eq[vec]);
1104 priv->eq_table.eq = calloc(
1106 sizeof *priv->eq_table.eq);
1107 if (!priv->eq_table.eq)
1115 kfree(mlx4_priv(&priv->dev)->eq_table.eq);
1161 &priv->eq_table.eq[i]);
1171 &priv->eq_table.eq[priv->dev.caps.num_comp_vectors]);
1184 &priv->eq_table.eq[i]);
1207 err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0,
1208 eq_name, priv->eq_table.eq + i);
1212 priv->eq_table.eq[i].have_irq = 1;
1226 priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn);
1229 priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn, err);
1232 eq_set_ci(&priv->eq_table.eq[i], 1);
1238 &priv->eq_table.eq[priv->dev.caps.num_comp_vectors]);*/
1243 mlx4_free_eq(&priv->dev, &priv->eq_table.eq[i]);
1264 priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn);
1269 mlx4_free_eq(&priv->dev, &priv->eq_table.eq[i]);
1303 Map the new eq to handle all asyncronous events
1305 priv->eq_table.eq[i].eqn);
1307 MLX4_DEBUG( "Failed mapping eq for interrupt test\n");
1319 priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn);
1336 err = request_irq(priv->eq_table.eq[vec].irq,
1339 priv->eq_table.eq + vec);
1351 eq_set_ci(&priv->eq_table.eq[vec], 1);
1383 free_irq(priv->eq_table.eq[vec].irq, &priv->eq_table.eq[vec]);