Lines Matching defs:flow

140  * struct qrtr_tx_flow - tx flow control
172 struct qrtr_tx_flow *flow;
191 /* Free tx flow counters */
193 flow = *slot;
195 kfree(flow);
217 * qrtr_tx_resume() - reset flow control counter
226 struct qrtr_tx_flow *flow;
232 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
234 if (flow) {
235 spin_lock(&flow->resume_tx.lock);
236 flow->pending = 0;
237 spin_unlock(&flow->resume_tx.lock);
238 wake_up_interruptible_all(&flow->resume_tx);
245 * qrtr_tx_wait() - flow control for outgoing packets
251 * The flow control scheme is based around the low and high "watermarks". When
263 struct qrtr_tx_flow *flow;
272 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
273 if (!flow) {
274 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
275 if (flow) {
276 init_waitqueue_head(&flow->resume_tx);
277 if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
278 kfree(flow);
279 flow = NULL;
285 /* Set confirm_rx if we where unable to find and allocate a flow */
286 if (!flow)
289 spin_lock_irq(&flow->resume_tx.lock);
290 ret = wait_event_interruptible_locked_irq(flow->resume_tx,
291 flow->pending < QRTR_TX_FLOW_HIGH ||
292 flow->tx_failed ||
298 } else if (flow->tx_failed) {
299 flow->tx_failed = 0;
302 flow->pending++;
303 confirm_rx = flow->pending == QRTR_TX_FLOW_LOW;
305 spin_unlock_irq(&flow->resume_tx.lock);
317 * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH,
320 * Work around this by marking the flow as having a failed transmission and
327 struct qrtr_tx_flow *flow;
330 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
332 if (flow) {
333 spin_lock_irq(&flow->resume_tx.lock);
334 flow->tx_failed = 1;
335 spin_unlock_irq(&flow->resume_tx.lock);
627 struct qrtr_tx_flow *flow;
653 flow = *slot;
654 wake_up_interruptible_all(&flow->resume_tx);