Lines Matching refs:self

30 	struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
38 if (!self) {
41 for (i = 0U; self->tx_rings > i; ++i) {
42 ring = self->ring[i];
46 if (self->aq_hw_ops->hw_ring_tx_head_update) {
47 err = self->aq_hw_ops->hw_ring_tx_head_update(
48 self->aq_hw,
60 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
80 err = self->aq_hw_ops->hw_ring_rx_fill(
81 self->aq_hw,
94 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
95 1U << self->aq_ring_param.vec_idx);
105 struct aq_vec_s *self = NULL;
107 self = kzalloc(sizeof(*self), GFP_KERNEL);
108 if (!self)
111 self->aq_nic = aq_nic;
112 self->aq_ring_param.vec_idx = idx;
113 self->aq_ring_param.cpu =
116 cpumask_set_cpu(self->aq_ring_param.cpu,
117 &self->aq_ring_param.affinity_mask);
119 self->tx_rings = 0;
120 self->rx_rings = 0;
122 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
125 return self;
128 int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
139 ring = &self->ring[i][AQ_VEC_TX_ID];
144 ++self->tx_rings;
148 ring = &self->ring[i][AQ_VEC_RX_ID];
151 self->napi.napi_id) < 0) {
168 ++self->rx_rings;
173 aq_vec_ring_free(self);
174 self = NULL;
180 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
187 self->aq_hw_ops = aq_hw_ops;
188 self->aq_hw = aq_hw;
190 for (i = 0U; self->tx_rings > i; ++i) {
191 ring = self->ring[i];
196 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
198 &self->aq_ring_param);
206 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
208 &self->aq_ring_param);
216 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
226 int aq_vec_start(struct aq_vec_s *self)
232 for (i = 0U; self->tx_rings > i; ++i) {
233 ring = self->ring[i];
234 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
239 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
245 napi_enable(&self->napi);
251 void aq_vec_stop(struct aq_vec_s *self)
256 for (i = 0U; self->tx_rings > i; ++i) {
257 ring = self->ring[i];
258 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
261 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
265 napi_disable(&self->napi);
268 void aq_vec_deinit(struct aq_vec_s *self)
273 if (!self)
276 for (i = 0U; self->tx_rings > i; ++i) {
277 ring = self->ring[i];
285 void aq_vec_free(struct aq_vec_s *self)
287 if (!self)
290 netif_napi_del(&self->napi);
292 kfree(self);
297 void aq_vec_ring_free(struct aq_vec_s *self)
302 if (!self)
305 for (i = 0U; self->tx_rings > i; ++i) {
306 ring = self->ring[i];
308 if (i < self->rx_rings) {
314 self->tx_rings = 0;
315 self->rx_rings = 0;
321 struct aq_vec_s *self = private;
324 if (!self) {
328 napi_schedule(&self->napi);
336 struct aq_vec_s *self = private;
340 if (!self)
342 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
347 self->aq_hw_ops->hw_irq_disable(self->aq_hw,
348 1U << self->aq_ring_param.vec_idx);
349 napi_schedule(&self->napi);
351 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
358 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
360 return &self->aq_ring_param.affinity_mask;
363 bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
365 return tc < self->rx_rings && tc < self->tx_rings;
368 unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
372 if (!aq_vec_is_valid_tc(self, tc))
375 count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
376 count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);