1/* 2 * IBM eServer eHCA Infiniband device driver for Linux on POWER 3 * 4 * Firmware Infiniband Interface code for POWER 5 * 6 * Authors: Christoph Raisch <raisch@de.ibm.com> 7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com> 8 * Joachim Fenkes <fenkes@de.ibm.com> 9 * Gerd Bayer <gerd.bayer@de.ibm.com> 10 * Waleri Fomin <fomin@de.ibm.com> 11 * 12 * Copyright (c) 2005 IBM Corporation 13 * 14 * All rights reserved. 15 * 16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB 17 * BSD. 18 * 19 * OpenIB BSD License 20 * 21 * Redistribution and use in source and binary forms, with or without 22 * modification, are permitted provided that the following conditions are met: 23 * 24 * Redistributions of source code must retain the above copyright notice, this 25 * list of conditions and the following disclaimer. 26 * 27 * Redistributions in binary form must reproduce the above copyright notice, 28 * this list of conditions and the following disclaimer in the documentation 29 * and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 42 * POSSIBILITY OF SUCH DAMAGE. 43 */ 44 45#include <asm/hvcall.h> 46#include "ehca_tools.h" 47#include "hcp_if.h" 48#include "hcp_phyp.h" 49#include "hipz_fns.h" 50#include "ipz_pt_fn.h" 51 52#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11) 53#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12) 54#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15) 55#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17) 56#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18) 57#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21) 58#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23) 59#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31) 60#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35) 61#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39) 62#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63) 63 64#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15) 65#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) 66#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39) 67#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47) 68 69#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63) 70#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31) 71#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64) 72#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63) 73#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63) 74 75#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) 76#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63) 77#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15) 78#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31) 79 80#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31) 81#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63) 82 83#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47) 84#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48) 85#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49) 86 87#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx" 88#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx" 89#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx" 90 91static DEFINE_SPINLOCK(hcall_lock); 92 93static u32 get_longbusy_msecs(int longbusy_rc) 94{ 95 switch (longbusy_rc) { 96 case H_LONG_BUSY_ORDER_1_MSEC: 97 return 1; 98 case H_LONG_BUSY_ORDER_10_MSEC: 99 return 10; 100 case H_LONG_BUSY_ORDER_100_MSEC: 101 return 100; 102 case H_LONG_BUSY_ORDER_1_SEC: 103 return 1000; 104 case H_LONG_BUSY_ORDER_10_SEC: 105 return 10000; 106 case H_LONG_BUSY_ORDER_100_SEC: 107 return 100000; 108 default: 109 return 1; 110 } 111} 112 113static long ehca_plpar_hcall_norets(unsigned long opcode, 114 unsigned long arg1, 115 unsigned long arg2, 116 unsigned long arg3, 117 unsigned long arg4, 118 unsigned long arg5, 119 unsigned long arg6, 120 unsigned long arg7) 121{ 122 long ret; 123 int i, sleep_msecs; 124 unsigned long flags = 0; 125 126 if (unlikely(ehca_debug_level >= 2)) 127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, 128 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); 129 130 for (i = 0; i < 5; i++) { 131 if (ehca_lock_hcalls) 132 spin_lock_irqsave(&hcall_lock, flags); 133 134 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4, 135 arg5, arg6, arg7); 136 137 if (ehca_lock_hcalls) 138 spin_unlock_irqrestore(&hcall_lock, flags); 139 140 if (H_IS_LONG_BUSY(ret)) { 141 sleep_msecs = get_longbusy_msecs(ret); 142 msleep_interruptible(sleep_msecs); 143 continue; 144 } 145 146 if (ret < H_SUCCESS) 147 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT, 148 opcode, ret, arg1, arg2, arg3, 149 arg4, arg5, arg6, arg7); 150 else 151 if (unlikely(ehca_debug_level >= 2)) 152 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); 153 154 return ret; 155 } 156 157 return H_BUSY; 158} 159 160static long ehca_plpar_hcall9(unsigned long opcode, 161 unsigned long *outs, /* array of 9 outputs */ 162 unsigned long arg1, 163 unsigned long arg2, 164 unsigned long arg3, 165 unsigned long arg4, 166 unsigned long arg5, 167 unsigned long arg6, 168 unsigned long arg7, 169 unsigned long arg8, 170 unsigned long arg9) 171{ 172 long ret; 173 int i, sleep_msecs; 174 unsigned long flags = 0; 175 176 if (unlikely(ehca_debug_level >= 2)) 177 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, 178 arg1, arg2, arg3, arg4, arg5, 179 arg6, arg7, arg8, arg9); 180 181 for (i = 0; i < 5; i++) { 182 if (ehca_lock_hcalls) 183 spin_lock_irqsave(&hcall_lock, flags); 184 185 ret = plpar_hcall9(opcode, outs, 186 arg1, arg2, arg3, arg4, arg5, 187 arg6, arg7, arg8, arg9); 188 189 if (ehca_lock_hcalls) 190 spin_unlock_irqrestore(&hcall_lock, flags); 191 192 if (H_IS_LONG_BUSY(ret)) { 193 sleep_msecs = get_longbusy_msecs(ret); 194 msleep_interruptible(sleep_msecs); 195 continue; 196 } 197 198 if (ret < H_SUCCESS) { 199 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, 200 opcode, arg1, arg2, arg3, arg4, arg5, 201 arg6, arg7, arg8, arg9); 202 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, 203 ret, outs[0], outs[1], outs[2], outs[3], 204 outs[4], outs[5], outs[6], outs[7], 205 outs[8]); 206 } else if (unlikely(ehca_debug_level >= 2)) 207 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, 208 ret, outs[0], outs[1], outs[2], outs[3], 209 outs[4], outs[5], outs[6], outs[7], 210 outs[8]); 211 return ret; 212 } 213 214 return H_BUSY; 215} 216 217u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, 218 struct ehca_pfeq *pfeq, 219 const u32 neq_control, 220 const u32 number_of_entries, 221 struct ipz_eq_handle *eq_handle, 222 u32 *act_nr_of_entries, 223 u32 *act_pages, 224 u32 *eq_ist) 225{ 226 u64 ret; 227 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 228 u64 allocate_controls; 229 230 /* resource type */ 231 allocate_controls = 3ULL; 232 233 /* ISN is associated */ 234 if (neq_control != 1) 235 allocate_controls = (1ULL << (63 - 7)) | allocate_controls; 236 else /* notification event queue */ 237 allocate_controls = (1ULL << 63) | allocate_controls; 238 239 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 240 adapter_handle.handle, /* r4 */ 241 allocate_controls, /* r5 */ 242 number_of_entries, /* r6 */ 243 0, 0, 0, 0, 0, 0); 244 eq_handle->handle = outs[0]; 245 *act_nr_of_entries = (u32)outs[3]; 246 *act_pages = (u32)outs[4]; 247 *eq_ist = (u32)outs[5]; 248 249 if (ret == H_NOT_ENOUGH_RESOURCES) 250 ehca_gen_err("Not enough resource - ret=%lli ", ret); 251 252 return ret; 253} 254 255u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle, 256 struct ipz_eq_handle eq_handle, 257 const u64 event_mask) 258{ 259 return ehca_plpar_hcall_norets(H_RESET_EVENTS, 260 adapter_handle.handle, /* r4 */ 261 eq_handle.handle, /* r5 */ 262 event_mask, /* r6 */ 263 0, 0, 0, 0); 264} 265 266u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, 267 struct ehca_cq *cq, 268 struct ehca_alloc_cq_parms *param) 269{ 270 int rc; 271 u64 ret; 272 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 273 274 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 275 adapter_handle.handle, /* r4 */ 276 2, /* r5 */ 277 param->eq_handle.handle, /* r6 */ 278 cq->token, /* r7 */ 279 param->nr_cqe, /* r8 */ 280 0, 0, 0, 0); 281 cq->ipz_cq_handle.handle = outs[0]; 282 param->act_nr_of_entries = (u32)outs[3]; 283 param->act_pages = (u32)outs[4]; 284 285 if (ret == H_SUCCESS) { 286 rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); 287 if (rc) { 288 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx", 289 rc, outs[5]); 290 291 ehca_plpar_hcall_norets(H_FREE_RESOURCE, 292 adapter_handle.handle, /* r4 */ 293 cq->ipz_cq_handle.handle, /* r5 */ 294 0, 0, 0, 0, 0); 295 ret = H_NO_MEM; 296 } 297 } 298 299 if (ret == H_NOT_ENOUGH_RESOURCES) 300 ehca_gen_err("Not enough resources. ret=%lli", ret); 301 302 return ret; 303} 304 305u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 306 struct ehca_alloc_qp_parms *parms, int is_user) 307{ 308 int rc; 309 u64 ret; 310 u64 allocate_controls, max_r10_reg, r11, r12; 311 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 312 313 allocate_controls = 314 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) 315 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0) 316 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype) 317 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype) 318 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage) 319 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE, 320 parms->squeue.page_size) 321 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE, 322 parms->rqueue.page_size) 323 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING, 324 !!(parms->ll_comp_flags & LLQP_RECV_COMP)) 325 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING, 326 !!(parms->ll_comp_flags & LLQP_SEND_COMP)) 327 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL, 328 parms->ud_av_l_key_ctl) 329 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1); 330 331 max_r10_reg = 332 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR, 333 parms->squeue.max_wr + 1) 334 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR, 335 parms->rqueue.max_wr + 1) 336 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE, 337 parms->squeue.max_sge) 338 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE, 339 parms->rqueue.max_sge); 340 341 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token); 342 343 if (parms->ext_type == EQPT_SRQ) 344 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit); 345 else 346 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn); 347 348 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 349 adapter_handle.handle, /* r4 */ 350 allocate_controls, /* r5 */ 351 parms->send_cq_handle.handle, 352 parms->recv_cq_handle.handle, 353 parms->eq_handle.handle, 354 ((u64)parms->token << 32) | parms->pd.value, 355 max_r10_reg, r11, r12); 356 357 parms->qp_handle.handle = outs[0]; 358 parms->real_qp_num = (u32)outs[1]; 359 parms->squeue.act_nr_wqes = 360 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); 361 parms->rqueue.act_nr_wqes = 362 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]); 363 parms->squeue.act_nr_sges = 364 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]); 365 parms->rqueue.act_nr_sges = 366 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]); 367 parms->squeue.queue_size = 368 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]); 369 parms->rqueue.queue_size = 370 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); 371 372 if (ret == H_SUCCESS) { 373 rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); 374 if (rc) { 375 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx", 376 rc, outs[6]); 377 378 ehca_plpar_hcall_norets(H_FREE_RESOURCE, 379 adapter_handle.handle, /* r4 */ 380 parms->qp_handle.handle, /* r5 */ 381 0, 0, 0, 0, 0); 382 ret = H_NO_MEM; 383 } 384 } 385 386 if (ret == H_NOT_ENOUGH_RESOURCES) 387 ehca_gen_err("Not enough resources. ret=%lli", ret); 388 389 return ret; 390} 391 392u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, 393 const u8 port_id, 394 struct hipz_query_port *query_port_response_block) 395{ 396 u64 ret; 397 u64 r_cb = virt_to_abs(query_port_response_block); 398 399 if (r_cb & (EHCA_PAGESIZE-1)) { 400 ehca_gen_err("response block not page aligned"); 401 return H_PARAMETER; 402 } 403 404 ret = ehca_plpar_hcall_norets(H_QUERY_PORT, 405 adapter_handle.handle, /* r4 */ 406 port_id, /* r5 */ 407 r_cb, /* r6 */ 408 0, 0, 0, 0); 409 410 if (ehca_debug_level >= 2) 411 ehca_dmp(query_port_response_block, 64, "response_block"); 412 413 return ret; 414} 415 416u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle, 417 const u8 port_id, const u32 port_cap, 418 const u8 init_type, const int modify_mask) 419{ 420 u64 port_attributes = port_cap; 421 422 if (modify_mask & IB_PORT_SHUTDOWN) 423 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1); 424 if (modify_mask & IB_PORT_INIT_TYPE) 425 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type); 426 if (modify_mask & IB_PORT_RESET_QKEY_CNTR) 427 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1); 428 429 return ehca_plpar_hcall_norets(H_MODIFY_PORT, 430 adapter_handle.handle, /* r4 */ 431 port_id, /* r5 */ 432 port_attributes, /* r6 */ 433 0, 0, 0, 0); 434} 435 436u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, 437 struct hipz_query_hca *query_hca_rblock) 438{ 439 u64 r_cb = virt_to_abs(query_hca_rblock); 440 441 if (r_cb & (EHCA_PAGESIZE-1)) { 442 ehca_gen_err("response_block=%p not page aligned", 443 query_hca_rblock); 444 return H_PARAMETER; 445 } 446 447 return ehca_plpar_hcall_norets(H_QUERY_HCA, 448 adapter_handle.handle, /* r4 */ 449 r_cb, /* r5 */ 450 0, 0, 0, 0, 0); 451} 452 453u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle, 454 const u8 pagesize, 455 const u8 queue_type, 456 const u64 resource_handle, 457 const u64 logical_address_of_page, 458 u64 count) 459{ 460 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES, 461 adapter_handle.handle, /* r4 */ 462 (u64)queue_type | ((u64)pagesize) << 8, 463 /* r5 */ 464 resource_handle, /* r6 */ 465 logical_address_of_page, /* r7 */ 466 count, /* r8 */ 467 0, 0); 468} 469 470u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle, 471 const struct ipz_eq_handle eq_handle, 472 struct ehca_pfeq *pfeq, 473 const u8 pagesize, 474 const u8 queue_type, 475 const u64 logical_address_of_page, 476 const u64 count) 477{ 478 if (count != 1) { 479 ehca_gen_err("Ppage counter=%llx", count); 480 return H_PARAMETER; 481 } 482 return hipz_h_register_rpage(adapter_handle, 483 pagesize, 484 queue_type, 485 eq_handle.handle, 486 logical_address_of_page, count); 487} 488 489u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle, 490 u32 ist) 491{ 492 u64 ret; 493 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE, 494 adapter_handle.handle, /* r4 */ 495 ist, /* r5 */ 496 0, 0, 0, 0, 0); 497 498 if (ret != H_SUCCESS && ret != H_BUSY) 499 ehca_gen_err("Could not query interrupt state."); 500 501 return ret; 502} 503 504u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle, 505 const struct ipz_cq_handle cq_handle, 506 struct ehca_pfcq *pfcq, 507 const u8 pagesize, 508 const u8 queue_type, 509 const u64 logical_address_of_page, 510 const u64 count, 511 const struct h_galpa gal) 512{ 513 if (count != 1) { 514 ehca_gen_err("Page counter=%llx", count); 515 return H_PARAMETER; 516 } 517 518 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type, 519 cq_handle.handle, logical_address_of_page, 520 count); 521} 522 523u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle, 524 const struct ipz_qp_handle qp_handle, 525 struct ehca_pfqp *pfqp, 526 const u8 pagesize, 527 const u8 queue_type, 528 const u64 logical_address_of_page, 529 const u64 count, 530 const struct h_galpa galpa) 531{ 532 if (count > 1) { 533 ehca_gen_err("Page counter=%llx", count); 534 return H_PARAMETER; 535 } 536 537 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type, 538 qp_handle.handle, logical_address_of_page, 539 count); 540} 541 542u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle, 543 const struct ipz_qp_handle qp_handle, 544 struct ehca_pfqp *pfqp, 545 void **log_addr_next_sq_wqe2processed, 546 void **log_addr_next_rq_wqe2processed, 547 int dis_and_get_function_code) 548{ 549 u64 ret; 550 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 551 552 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, 553 adapter_handle.handle, /* r4 */ 554 dis_and_get_function_code, /* r5 */ 555 qp_handle.handle, /* r6 */ 556 0, 0, 0, 0, 0, 0); 557 if (log_addr_next_sq_wqe2processed) 558 *log_addr_next_sq_wqe2processed = (void *)outs[0]; 559 if (log_addr_next_rq_wqe2processed) 560 *log_addr_next_rq_wqe2processed = (void *)outs[1]; 561 562 return ret; 563} 564 565u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, 566 const struct ipz_qp_handle qp_handle, 567 struct ehca_pfqp *pfqp, 568 const u64 update_mask, 569 struct hcp_modify_qp_control_block *mqpcb, 570 struct h_galpa gal) 571{ 572 u64 ret; 573 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 574 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, 575 adapter_handle.handle, /* r4 */ 576 qp_handle.handle, /* r5 */ 577 update_mask, /* r6 */ 578 virt_to_abs(mqpcb), /* r7 */ 579 0, 0, 0, 0, 0); 580 581 if (ret == H_NOT_ENOUGH_RESOURCES) 582 ehca_gen_err("Insufficient resources ret=%lli", ret); 583 584 return ret; 585} 586 587u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle, 588 const struct ipz_qp_handle qp_handle, 589 struct ehca_pfqp *pfqp, 590 struct hcp_modify_qp_control_block *qqpcb, 591 struct h_galpa gal) 592{ 593 return ehca_plpar_hcall_norets(H_QUERY_QP, 594 adapter_handle.handle, /* r4 */ 595 qp_handle.handle, /* r5 */ 596 virt_to_abs(qqpcb), /* r6 */ 597 0, 0, 0, 0); 598} 599 600u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, 601 struct ehca_qp *qp) 602{ 603 u64 ret; 604 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 605 606 ret = hcp_galpas_dtor(&qp->galpas); 607 if (ret) { 608 ehca_gen_err("Could not destruct qp->galpas"); 609 return H_RESOURCE; 610 } 611 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, 612 adapter_handle.handle, /* r4 */ 613 /* function code */ 614 1, /* r5 */ 615 qp->ipz_qp_handle.handle, /* r6 */ 616 0, 0, 0, 0, 0, 0); 617 if (ret == H_HARDWARE) 618 ehca_gen_err("HCA not operational. ret=%lli", ret); 619 620 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 621 adapter_handle.handle, /* r4 */ 622 qp->ipz_qp_handle.handle, /* r5 */ 623 0, 0, 0, 0, 0); 624 625 if (ret == H_RESOURCE) 626 ehca_gen_err("Resource still in use. ret=%lli", ret); 627 628 return ret; 629} 630 631u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle, 632 const struct ipz_qp_handle qp_handle, 633 struct h_galpa gal, 634 u32 port) 635{ 636 return ehca_plpar_hcall_norets(H_DEFINE_AQP0, 637 adapter_handle.handle, /* r4 */ 638 qp_handle.handle, /* r5 */ 639 port, /* r6 */ 640 0, 0, 0, 0); 641} 642 643u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle, 644 const struct ipz_qp_handle qp_handle, 645 struct h_galpa gal, 646 u32 port, u32 * pma_qp_nr, 647 u32 * bma_qp_nr) 648{ 649 u64 ret; 650 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 651 652 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, 653 adapter_handle.handle, /* r4 */ 654 qp_handle.handle, /* r5 */ 655 port, /* r6 */ 656 0, 0, 0, 0, 0, 0); 657 *pma_qp_nr = (u32)outs[0]; 658 *bma_qp_nr = (u32)outs[1]; 659 660 if (ret == H_ALIAS_EXIST) 661 ehca_gen_err("AQP1 already exists. ret=%lli", ret); 662 663 return ret; 664} 665 666u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle, 667 const struct ipz_qp_handle qp_handle, 668 struct h_galpa gal, 669 u16 mcg_dlid, 670 u64 subnet_prefix, u64 interface_id) 671{ 672 u64 ret; 673 674 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP, 675 adapter_handle.handle, /* r4 */ 676 qp_handle.handle, /* r5 */ 677 mcg_dlid, /* r6 */ 678 interface_id, /* r7 */ 679 subnet_prefix, /* r8 */ 680 0, 0); 681 682 if (ret == H_NOT_ENOUGH_RESOURCES) 683 ehca_gen_err("Not enough resources. ret=%lli", ret); 684 685 return ret; 686} 687 688u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle, 689 const struct ipz_qp_handle qp_handle, 690 struct h_galpa gal, 691 u16 mcg_dlid, 692 u64 subnet_prefix, u64 interface_id) 693{ 694 return ehca_plpar_hcall_norets(H_DETACH_MCQP, 695 adapter_handle.handle, /* r4 */ 696 qp_handle.handle, /* r5 */ 697 mcg_dlid, /* r6 */ 698 interface_id, /* r7 */ 699 subnet_prefix, /* r8 */ 700 0, 0); 701} 702 703u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle, 704 struct ehca_cq *cq, 705 u8 force_flag) 706{ 707 u64 ret; 708 709 ret = hcp_galpas_dtor(&cq->galpas); 710 if (ret) { 711 ehca_gen_err("Could not destruct cp->galpas"); 712 return H_RESOURCE; 713 } 714 715 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 716 adapter_handle.handle, /* r4 */ 717 cq->ipz_cq_handle.handle, /* r5 */ 718 force_flag != 0 ? 1L : 0L, /* r6 */ 719 0, 0, 0, 0); 720 721 if (ret == H_RESOURCE) 722 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret); 723 724 return ret; 725} 726 727u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle, 728 struct ehca_eq *eq) 729{ 730 u64 ret; 731 732 ret = hcp_galpas_dtor(&eq->galpas); 733 if (ret) { 734 ehca_gen_err("Could not destruct eq->galpas"); 735 return H_RESOURCE; 736 } 737 738 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 739 adapter_handle.handle, /* r4 */ 740 eq->ipz_eq_handle.handle, /* r5 */ 741 0, 0, 0, 0, 0); 742 743 if (ret == H_RESOURCE) 744 ehca_gen_err("Resource in use. ret=%lli ", ret); 745 746 return ret; 747} 748 749u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, 750 const struct ehca_mr *mr, 751 const u64 vaddr, 752 const u64 length, 753 const u32 access_ctrl, 754 const struct ipz_pd pd, 755 struct ehca_mr_hipzout_parms *outparms) 756{ 757 u64 ret; 758 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 759 760 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 761 adapter_handle.handle, /* r4 */ 762 5, /* r5 */ 763 vaddr, /* r6 */ 764 length, /* r7 */ 765 (((u64)access_ctrl) << 32ULL), /* r8 */ 766 pd.value, /* r9 */ 767 0, 0, 0); 768 outparms->handle.handle = outs[0]; 769 outparms->lkey = (u32)outs[2]; 770 outparms->rkey = (u32)outs[3]; 771 772 return ret; 773} 774 775u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, 776 const struct ehca_mr *mr, 777 const u8 pagesize, 778 const u8 queue_type, 779 const u64 logical_address_of_page, 780 const u64 count) 781{ 782 u64 ret; 783 784 if (unlikely(ehca_debug_level >= 3)) { 785 if (count > 1) { 786 u64 *kpage; 787 int i; 788 kpage = (u64 *)abs_to_virt(logical_address_of_page); 789 for (i = 0; i < count; i++) 790 ehca_gen_dbg("kpage[%d]=%p", 791 i, (void *)kpage[i]); 792 } else 793 ehca_gen_dbg("kpage=%p", 794 (void *)logical_address_of_page); 795 } 796 797 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { 798 ehca_gen_err("logical_address_of_page not on a 4k boundary " 799 "adapter_handle=%llx mr=%p mr_handle=%llx " 800 "pagesize=%x queue_type=%x " 801 "logical_address_of_page=%llx count=%llx", 802 adapter_handle.handle, mr, 803 mr->ipz_mr_handle.handle, pagesize, queue_type, 804 logical_address_of_page, count); 805 ret = H_PARAMETER; 806 } else 807 ret = hipz_h_register_rpage(adapter_handle, pagesize, 808 queue_type, 809 mr->ipz_mr_handle.handle, 810 logical_address_of_page, count); 811 return ret; 812} 813 814u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle, 815 const struct ehca_mr *mr, 816 struct ehca_mr_hipzout_parms *outparms) 817{ 818 u64 ret; 819 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 820 821 ret = ehca_plpar_hcall9(H_QUERY_MR, outs, 822 adapter_handle.handle, /* r4 */ 823 mr->ipz_mr_handle.handle, /* r5 */ 824 0, 0, 0, 0, 0, 0, 0); 825 outparms->len = outs[0]; 826 outparms->vaddr = outs[1]; 827 outparms->acl = outs[4] >> 32; 828 outparms->lkey = (u32)(outs[5] >> 32); 829 outparms->rkey = (u32)(outs[5] & (0xffffffff)); 830 831 return ret; 832} 833 834u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle, 835 const struct ehca_mr *mr) 836{ 837 return ehca_plpar_hcall_norets(H_FREE_RESOURCE, 838 adapter_handle.handle, /* r4 */ 839 mr->ipz_mr_handle.handle, /* r5 */ 840 0, 0, 0, 0, 0); 841} 842 843u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle, 844 const struct ehca_mr *mr, 845 const u64 vaddr_in, 846 const u64 length, 847 const u32 access_ctrl, 848 const struct ipz_pd pd, 849 const u64 mr_addr_cb, 850 struct ehca_mr_hipzout_parms *outparms) 851{ 852 u64 ret; 853 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 854 855 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, 856 adapter_handle.handle, /* r4 */ 857 mr->ipz_mr_handle.handle, /* r5 */ 858 vaddr_in, /* r6 */ 859 length, /* r7 */ 860 /* r8 */ 861 ((((u64)access_ctrl) << 32ULL) | pd.value), 862 mr_addr_cb, /* r9 */ 863 0, 0, 0); 864 outparms->vaddr = outs[1]; 865 outparms->lkey = (u32)outs[2]; 866 outparms->rkey = (u32)outs[3]; 867 868 return ret; 869} 870 871u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle, 872 const struct ehca_mr *mr, 873 const struct ehca_mr *orig_mr, 874 const u64 vaddr_in, 875 const u32 access_ctrl, 876 const struct ipz_pd pd, 877 struct ehca_mr_hipzout_parms *outparms) 878{ 879 u64 ret; 880 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 881 882 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, 883 adapter_handle.handle, /* r4 */ 884 orig_mr->ipz_mr_handle.handle, /* r5 */ 885 vaddr_in, /* r6 */ 886 (((u64)access_ctrl) << 32ULL), /* r7 */ 887 pd.value, /* r8 */ 888 0, 0, 0, 0); 889 outparms->handle.handle = outs[0]; 890 outparms->lkey = (u32)outs[2]; 891 outparms->rkey = (u32)outs[3]; 892 893 return ret; 894} 895 896u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle, 897 const struct ehca_mw *mw, 898 const struct ipz_pd pd, 899 struct ehca_mw_hipzout_parms *outparms) 900{ 901 u64 ret; 902 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 903 904 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 905 adapter_handle.handle, /* r4 */ 906 6, /* r5 */ 907 pd.value, /* r6 */ 908 0, 0, 0, 0, 0, 0); 909 outparms->handle.handle = outs[0]; 910 outparms->rkey = (u32)outs[3]; 911 912 return ret; 913} 914 915u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle, 916 const struct ehca_mw *mw, 917 struct ehca_mw_hipzout_parms *outparms) 918{ 919 u64 ret; 920 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 921 922 ret = ehca_plpar_hcall9(H_QUERY_MW, outs, 923 adapter_handle.handle, /* r4 */ 924 mw->ipz_mw_handle.handle, /* r5 */ 925 0, 0, 0, 0, 0, 0, 0); 926 outparms->rkey = (u32)outs[3]; 927 928 return ret; 929} 930 931u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle, 932 const struct ehca_mw *mw) 933{ 934 return ehca_plpar_hcall_norets(H_FREE_RESOURCE, 935 adapter_handle.handle, /* r4 */ 936 mw->ipz_mw_handle.handle, /* r5 */ 937 0, 0, 0, 0, 0); 938} 939 940u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle, 941 const u64 ressource_handle, 942 void *rblock, 943 unsigned long *byte_count) 944{ 945 u64 r_cb = virt_to_abs(rblock); 946 947 if (r_cb & (EHCA_PAGESIZE-1)) { 948 ehca_gen_err("rblock not page aligned."); 949 return H_PARAMETER; 950 } 951 952 return ehca_plpar_hcall_norets(H_ERROR_DATA, 953 adapter_handle.handle, 954 ressource_handle, 955 r_cb, 956 0, 0, 0, 0); 957} 958 959u64 hipz_h_eoi(int irq) 960{ 961 unsigned long xirr; 962 963 iosync(); 964 xirr = (0xffULL << 24) | irq; 965 966 return plpar_hcall_norets(H_EOI, xirr); 967} 968