Lines Matching refs:cnt

33 	struct efx_tc_counter *cnt = ptr;
35 WARN_ON(!list_empty(&cnt->users));
39 * threads could still be obtaining new pointers to *cnt if they can
42 flush_work(&cnt->work);
43 EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock));
44 kfree(cnt);
89 struct efx_tc_counter *cnt = container_of(work, struct efx_tc_counter, work);
95 spin_lock_bh(&cnt->lock);
96 touched = READ_ONCE(cnt->touched);
98 list_for_each_entry(act, &cnt->users, count_user) {
127 spin_unlock_bh(&cnt->lock);
135 struct efx_tc_counter *cnt;
138 cnt = kzalloc(sizeof(*cnt), GFP_USER);
139 if (!cnt)
142 spin_lock_init(&cnt->lock);
143 INIT_WORK(&cnt->work, efx_tc_counter_work);
144 cnt->touched = jiffies;
145 cnt->type = type;
147 rc = efx_mae_allocate_counter(efx, cnt);
150 INIT_LIST_HEAD(&cnt->users);
151 rc = rhashtable_insert_fast(&efx->tc->counter_ht, &cnt->linkage,
155 return cnt;
162 rc2 = efx_mae_free_counter(efx, cnt);
166 cnt->fw_id, rc2);
168 kfree(cnt);
173 struct efx_tc_counter *cnt)
177 rhashtable_remove_fast(&efx->tc->counter_ht, &cnt->linkage,
179 rc = efx_mae_free_counter(efx, cnt);
183 cnt->fw_id, rc);
184 WARN_ON(!list_empty(&cnt->users));
192 flush_work(&cnt->work);
193 EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock));
194 kfree(cnt);
218 efx_tc_flower_release_counter(efx, ctr->cnt);
227 struct efx_tc_counter *cnt;
246 cnt = efx_tc_flower_allocate_counter(efx, type);
247 if (IS_ERR(cnt)) {
252 return (void *)cnt; /* it's an ERR_PTR */
254 ctr->cnt = cnt;
328 struct efx_tc_counter *cnt;
330 rcu_read_lock(); /* Protect against deletion of 'cnt' */
331 cnt = efx_tc_flower_find_counter_by_fw_id(efx, counter_type, counter_idx);
332 if (!cnt) {
344 spin_lock_bh(&cnt->lock);
345 if ((s32)mark - (s32)cnt->gen < 0) {
357 cnt->gen = mark;
359 cnt->packets += packets;
360 cnt->bytes += bytes;
361 cnt->touched = jiffies;
363 spin_unlock_bh(&cnt->lock);
364 schedule_work(&cnt->work);