1From 6f9c7b5c83a832884b3e47a6ce52145991073586 Mon Sep 17 00:00:00 2001
2From: Eli Cohen <eli@mellanox.co.il>
3Date: Sun, 19 Sep 2010 11:28:40 +0200
4Subject: [PATCH] libmlx4: fix possible inline size
5
6The current driver checks required inline size by making sure it does not
7exceed 1024. This is wrong since the whole WQE is limited to 1008 bytes.
8Moreover, a more careful claculation is required to avoid cases where the
9application requests inline support in a certain size that when used later
10could cause connections to stall due to bad WQEs. This patch takes into account
11the size of the WQE, the segements used to create a WQE and the overhead
12incured by the inline segments themselves.
13
14Signed-off-by: Eli Cohen <eli@mellanox.co.il>
15---
16 src/verbs.c |   45 ++++++++++++++++++++++++++++++++++++++++-----
17 1 files changed, 40 insertions(+), 5 deletions(-)
18
19Index: libmlx4/src/verbs.c
20===================================================================
21--- libmlx4.orig/src/verbs.c	2010-09-29 11:10:17.691587848 +0200
22+++ libmlx4/src/verbs.c	2010-09-29 11:16:11.031586721 +0200
23@@ -402,6 +402,44 @@ int mlx4_destroy_srq(struct ibv_srq *ibs
24 	return 0;
25 }
26 
27+static int verify_sizes(struct ibv_qp_init_attr *attr, struct mlx4_context *context)
28+{
29+	int size;
30+	int nsegs;
31+
32+	if (attr->cap.max_send_wr     > context->max_qp_wr ||
33+	    attr->cap.max_recv_wr     > context->max_qp_wr ||
34+	    attr->cap.max_send_sge    > context->max_sge   ||
35+	    attr->cap.max_recv_sge    > context->max_sge)
36+		return -1;
37+
38+	if (attr->cap.max_inline_data) {
39+		nsegs = num_inline_segs(attr->cap.max_inline_data, attr->qp_type);
40+		size = MLX4_MAX_WQE_SIZE - nsegs * sizeof (struct mlx4_wqe_inline_seg);
41+		switch (attr->qp_type) {
42+		case IBV_QPT_UD:
43+			size -= (sizeof (struct mlx4_wqe_ctrl_seg) +
44+				 sizeof (struct mlx4_wqe_datagram_seg));
45+			break;
46+
47+		case IBV_QPT_RC:
48+		case IBV_QPT_UC:
49+		case IBV_QPT_XRC:
50+			size -= (sizeof (struct mlx4_wqe_ctrl_seg) +
51+				 sizeof (struct mlx4_wqe_raddr_seg));
52+			break;
53+
54+		default:
55+			return 0;
56+		}
57+
58+		if (attr->cap.max_inline_data > size)
59+			return -1;
60+	}
61+
62+	return 0;
63+}
64+
65 struct ibv_qp *mlx4_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
66 {
67 	struct mlx4_create_qp     cmd;
68@@ -412,11 +450,7 @@ struct ibv_qp *mlx4_create_qp(struct ibv
69 
70 
71 	/* Sanity check QP size before proceeding */
72-	if (attr->cap.max_send_wr     > context->max_qp_wr ||
73-	    attr->cap.max_recv_wr     > context->max_qp_wr ||
74-	    attr->cap.max_send_sge    > context->max_sge   ||
75-	    attr->cap.max_recv_sge    > context->max_sge   ||
76-	    attr->cap.max_inline_data > 1024)
77+	if (verify_sizes(attr, context))
78 		return NULL;
79 
80 	qp = malloc(sizeof *qp);
81Index: libmlx4/src/mlx4.h
82===================================================================
83--- libmlx4.orig/src/mlx4.h	2010-09-29 11:10:17.691587848 +0200
84+++ libmlx4/src/mlx4.h	2010-09-29 11:11:35.559586971 +0200
85@@ -159,6 +159,10 @@ enum {
86 	MLX4_CQE_OPCODE_RESIZE		= 0x16,
87 };
88 
89+enum {
90+	MLX4_MAX_WQE_SIZE = 1008
91+};
92+
93 struct mlx4_device {
94 	struct ibv_device		ibv_dev;
95 	int				page_size;
96@@ -410,6 +414,7 @@ int mlx4_post_recv(struct ibv_qp *ibqp, 
97 			  struct ibv_recv_wr **bad_wr);
98 void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
99 			   struct mlx4_qp *qp);
100+int num_inline_segs(int data, enum ibv_qp_type type);
101 int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
102 		       enum ibv_qp_type type, struct mlx4_qp *qp);
103 void mlx4_set_sq_sizes(struct mlx4_qp *qp, struct ibv_qp_cap *cap,
104Index: libmlx4/src/qp.c
105===================================================================
106--- libmlx4.orig/src/qp.c	2010-09-29 11:10:17.691587848 +0200
107+++ libmlx4/src/qp.c	2010-09-29 11:12:14.931587492 +0200
108@@ -505,7 +505,7 @@ out:
109 	return ret;
110 }
111 
112-static int num_inline_segs(int data, enum ibv_qp_type type)
113+int num_inline_segs(int data, enum ibv_qp_type type)
114 {
115 	/*
116 	 * Inline data segments are not allowed to cross 64 byte
117@@ -634,7 +634,8 @@ void mlx4_set_sq_sizes(struct mlx4_qp *q
118 	int wqe_size;
119 	struct mlx4_context *ctx = to_mctx(qp->ibv_qp.context);
120 
121-	wqe_size = (1 << qp->sq.wqe_shift) - sizeof (struct mlx4_wqe_ctrl_seg);
122+	wqe_size = min((1 << qp->sq.wqe_shift), MLX4_MAX_WQE_SIZE) -
123+		sizeof (struct mlx4_wqe_ctrl_seg);
124 	switch (type) {
125 	case IBV_QPT_UD:
126 		wqe_size -= sizeof (struct mlx4_wqe_datagram_seg);
127