Lines Matching refs:conn

53  *   @conn   connection to update
62 struct smc_connection *conn = &smc->conn;
67 smc_curs_add(conn->rmb_desc->len, &cons, len);
70 if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) {
71 diff = smc_curs_comp(conn->rmb_desc->len, &cons,
72 &conn->urg_curs);
77 conn->urg_state = SMC_URG_READ;
83 smc_curs_add(conn->rmb_desc->len, &cons, 1);
84 conn->urg_rx_skip_pend = false;
87 conn->urg_state = SMC_URG_READ;
91 smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn);
95 smc_tx_consumer_update(conn, force);
102 struct smc_connection *conn = &smc->conn;
105 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
119 struct smc_connection *conn;
126 conn = &smc->conn;
130 if (atomic_sub_and_test(priv->len, &conn->splice_pending))
152 struct smc_link_group *lgr = smc->conn.lgr;
161 nr_pages = !lgr->is_smcd && smc->conn.rmb_desc->is_vm ?
180 (!lgr->is_smcd && !smc->conn.rmb_desc->is_vm)) {
184 partial[0].offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
187 pages[0] = smc->conn.rmb_desc->pages;
215 if (!lgr->is_smcd && smc->conn.rmb_desc->is_vm) {
219 get_page(smc->conn.rmb_desc->pages);
221 atomic_add(bytes, &smc->conn.splice_pending);
241 static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
243 return atomic_read(&conn->bytes_to_rcv) &&
244 !atomic_read(&conn->splice_pending);
256 int (*fcrit)(struct smc_connection *conn))
259 struct smc_connection *conn = &smc->conn;
261 &conn->local_tx_ctrl.conn_state_flags;
265 if (fcrit(conn))
273 conn->killed ||
274 fcrit(conn),
284 struct smc_connection *conn = &smc->conn;
290 !(conn->urg_state == SMC_URG_VALID) ||
291 conn->urg_state == SMC_URG_READ)
295 if (conn->urg_state == SMC_URG_VALID) {
297 smc->conn.urg_state = SMC_URG_READ;
301 rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1);
303 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
304 if (smc_curs_diff(conn->rmb_desc->len, &cons,
305 &conn->urg_curs) > 1)
306 conn->urg_rx_skip_pend = true;
327 struct smc_connection *conn = &smc->conn;
329 if (smc_rx_data_available(conn))
331 else if (conn->urg_state == SMC_URG_VALID)
349 struct smc_connection *conn = &smc->conn;
350 int (*func)(struct smc_connection *conn);
371 readable = atomic_read(&conn->bytes_to_rcv);
372 if (readable >= conn->rmb_desc->len)
373 SMC_STAT_RMB_RX_FULL(smc, !conn->lnk);
376 SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk);
378 rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
384 if (conn->killed)
428 if (!smc_rx_data_available(conn)) {
436 readable = atomic_read(&conn->bytes_to_rcv);
437 splbytes = atomic_read(&conn->splice_pending);
447 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
450 smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
451 if (conn->urg_state == SMC_URG_VALID &&
459 chunk_len = min_t(size_t, copylen, conn->rmb_desc->len -
463 smc_rmb_sync_sg_for_cpu(conn);
496 atomic_sub(copylen, &conn->bytes_to_rcv);
513 atomic_set(&smc->conn.splice_pending, 0);
514 smc->conn.urg_state = SMC_URG_READ;