1/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 2 * 3 * Copyright (C) 2010 David S. Miller <davem@davemloft.net> 4 */ 5 6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8#include <linux/kernel.h> 9#include <linux/module.h> 10#include <linux/of.h> 11#include <linux/of_device.h> 12#include <linux/cpumask.h> 13#include <linux/slab.h> 14#include <linux/interrupt.h> 15#include <linux/crypto.h> 16#include <crypto/md5.h> 17#include <crypto/sha.h> 18#include <crypto/aes.h> 19#include <crypto/des.h> 20#include <linux/mutex.h> 21#include <linux/delay.h> 22#include <linux/sched.h> 23 24#include <crypto/internal/hash.h> 25#include <crypto/scatterwalk.h> 26#include <crypto/algapi.h> 27 28#include <asm/hypervisor.h> 29#include <asm/mdesc.h> 30 31#include "n2_core.h" 32 33#define DRV_MODULE_NAME "n2_crypto" 34#define DRV_MODULE_VERSION "0.1" 35#define DRV_MODULE_RELDATE "April 29, 2010" 36 37static char version[] __devinitdata = 38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 39 40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 41MODULE_DESCRIPTION("Niagara2 Crypto driver"); 42MODULE_LICENSE("GPL"); 43MODULE_VERSION(DRV_MODULE_VERSION); 44 45#define N2_CRA_PRIORITY 300 46 47static DEFINE_MUTEX(spu_lock); 48 49struct spu_queue { 50 cpumask_t sharing; 51 unsigned long qhandle; 52 53 spinlock_t lock; 54 u8 q_type; 55 void *q; 56 unsigned long head; 57 unsigned long tail; 58 struct list_head jobs; 59 60 unsigned long devino; 61 62 char irq_name[32]; 63 unsigned int irq; 64 65 struct list_head list; 66}; 67 68static struct spu_queue **cpu_to_cwq; 69static struct spu_queue **cpu_to_mau; 70 71static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) 72{ 73 if (q->q_type == HV_NCS_QTYPE_MAU) { 74 off += MAU_ENTRY_SIZE; 75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) 76 off = 0; 77 } else { 78 off += CWQ_ENTRY_SIZE; 79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) 80 off = 0; 81 } 82 return off; 83} 84 85struct n2_request_common { 86 struct list_head entry; 87 unsigned int offset; 88}; 89#define OFFSET_NOT_RUNNING (~(unsigned int)0) 90 91/* An async job request records the final tail value it used in 92 * n2_request_common->offset, test to see if that offset is in 93 * the range old_head, new_head, inclusive. 94 */ 95static inline bool job_finished(struct spu_queue *q, unsigned int offset, 96 unsigned long old_head, unsigned long new_head) 97{ 98 if (old_head <= new_head) { 99 if (offset > old_head && offset <= new_head) 100 return true; 101 } else { 102 if (offset > old_head || offset <= new_head) 103 return true; 104 } 105 return false; 106} 107 108/* When the HEAD marker is unequal to the actual HEAD, we get 109 * a virtual device INO interrupt. We should process the 110 * completed CWQ entries and adjust the HEAD marker to clear 111 * the IRQ. 112 */ 113static irqreturn_t cwq_intr(int irq, void *dev_id) 114{ 115 unsigned long off, new_head, hv_ret; 116 struct spu_queue *q = dev_id; 117 118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", 119 smp_processor_id(), q->qhandle); 120 121 spin_lock(&q->lock); 122 123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); 124 125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", 126 smp_processor_id(), new_head, hv_ret); 127 128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { 129 } 130 131 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); 132 if (hv_ret == HV_EOK) 133 q->head = new_head; 134 135 spin_unlock(&q->lock); 136 137 return IRQ_HANDLED; 138} 139 140static irqreturn_t mau_intr(int irq, void *dev_id) 141{ 142 struct spu_queue *q = dev_id; 143 unsigned long head, hv_ret; 144 145 spin_lock(&q->lock); 146 147 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", 148 smp_processor_id(), q->qhandle); 149 150 hv_ret = sun4v_ncs_gethead(q->qhandle, &head); 151 152 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", 153 smp_processor_id(), head, hv_ret); 154 155 sun4v_ncs_sethead_marker(q->qhandle, head); 156 157 spin_unlock(&q->lock); 158 159 return IRQ_HANDLED; 160} 161 162static void *spu_queue_next(struct spu_queue *q, void *cur) 163{ 164 return q->q + spu_next_offset(q, cur - q->q); 165} 166 167static int spu_queue_num_free(struct spu_queue *q) 168{ 169 unsigned long head = q->head; 170 unsigned long tail = q->tail; 171 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); 172 unsigned long diff; 173 174 if (head > tail) 175 diff = head - tail; 176 else 177 diff = (end - tail) + head; 178 179 return (diff / CWQ_ENTRY_SIZE) - 1; 180} 181 182static void *spu_queue_alloc(struct spu_queue *q, int num_entries) 183{ 184 int avail = spu_queue_num_free(q); 185 186 if (avail >= num_entries) 187 return q->q + q->tail; 188 189 return NULL; 190} 191 192static unsigned long spu_queue_submit(struct spu_queue *q, void *last) 193{ 194 unsigned long hv_ret, new_tail; 195 196 new_tail = spu_next_offset(q, last - q->q); 197 198 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); 199 if (hv_ret == HV_EOK) 200 q->tail = new_tail; 201 return hv_ret; 202} 203 204static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, 205 int enc_type, int auth_type, 206 unsigned int hash_len, 207 bool sfas, bool sob, bool eob, bool encrypt, 208 int opcode) 209{ 210 u64 word = (len - 1) & CONTROL_LEN; 211 212 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); 213 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); 214 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); 215 if (sfas) 216 word |= CONTROL_STORE_FINAL_AUTH_STATE; 217 if (sob) 218 word |= CONTROL_START_OF_BLOCK; 219 if (eob) 220 word |= CONTROL_END_OF_BLOCK; 221 if (encrypt) 222 word |= CONTROL_ENCRYPT; 223 if (hmac_key_len) 224 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; 225 if (hash_len) 226 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; 227 228 return word; 229} 230 231 232struct n2_ahash_alg { 233 struct list_head entry; 234 const char *hash_zero; 235 const u32 *hash_init; 236 u8 hw_op_hashsz; 237 u8 digest_size; 238 u8 auth_type; 239 u8 hmac_type; 240 struct ahash_alg alg; 241}; 242 243static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) 244{ 245 struct crypto_alg *alg = tfm->__crt_alg; 246 struct ahash_alg *ahash_alg; 247 248 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 249 250 return container_of(ahash_alg, struct n2_ahash_alg, alg); 251} 252 253struct n2_hmac_alg { 254 const char *child_alg; 255 struct n2_ahash_alg derived; 256}; 257 258static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) 259{ 260 struct crypto_alg *alg = tfm->__crt_alg; 261 struct ahash_alg *ahash_alg; 262 263 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 264 265 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); 266} 267 268struct n2_hash_ctx { 269 struct crypto_ahash *fallback_tfm; 270}; 271 272#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ 273 274struct n2_hmac_ctx { 275 struct n2_hash_ctx base; 276 277 struct crypto_shash *child_shash; 278 279 int hash_key_len; 280 unsigned char hash_key[N2_HASH_KEY_MAX]; 281}; 282 283struct n2_hash_req_ctx { 284 union { 285 struct md5_state md5; 286 struct sha1_state sha1; 287 struct sha256_state sha256; 288 } u; 289 290 struct ahash_request fallback_req; 291}; 292 293static int n2_hash_async_init(struct ahash_request *req) 294{ 295 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 296 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 297 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 298 299 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 300 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 301 302 return crypto_ahash_init(&rctx->fallback_req); 303} 304 305static int n2_hash_async_update(struct ahash_request *req) 306{ 307 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 308 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 309 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 310 311 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 312 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 313 rctx->fallback_req.nbytes = req->nbytes; 314 rctx->fallback_req.src = req->src; 315 316 return crypto_ahash_update(&rctx->fallback_req); 317} 318 319static int n2_hash_async_final(struct ahash_request *req) 320{ 321 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 322 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 323 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 324 325 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 326 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 327 rctx->fallback_req.result = req->result; 328 329 return crypto_ahash_final(&rctx->fallback_req); 330} 331 332static int n2_hash_async_finup(struct ahash_request *req) 333{ 334 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 335 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 336 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 337 338 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 339 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 340 rctx->fallback_req.nbytes = req->nbytes; 341 rctx->fallback_req.src = req->src; 342 rctx->fallback_req.result = req->result; 343 344 return crypto_ahash_finup(&rctx->fallback_req); 345} 346 347static int n2_hash_cra_init(struct crypto_tfm *tfm) 348{ 349 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 350 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 352 struct crypto_ahash *fallback_tfm; 353 int err; 354 355 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 356 CRYPTO_ALG_NEED_FALLBACK); 357 if (IS_ERR(fallback_tfm)) { 358 pr_warning("Fallback driver '%s' could not be loaded!\n", 359 fallback_driver_name); 360 err = PTR_ERR(fallback_tfm); 361 goto out; 362 } 363 364 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 365 crypto_ahash_reqsize(fallback_tfm))); 366 367 ctx->fallback_tfm = fallback_tfm; 368 return 0; 369 370out: 371 return err; 372} 373 374static void n2_hash_cra_exit(struct crypto_tfm *tfm) 375{ 376 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 377 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 378 379 crypto_free_ahash(ctx->fallback_tfm); 380} 381 382static int n2_hmac_cra_init(struct crypto_tfm *tfm) 383{ 384 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 385 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 386 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 387 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); 388 struct crypto_ahash *fallback_tfm; 389 struct crypto_shash *child_shash; 390 int err; 391 392 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 393 CRYPTO_ALG_NEED_FALLBACK); 394 if (IS_ERR(fallback_tfm)) { 395 pr_warning("Fallback driver '%s' could not be loaded!\n", 396 fallback_driver_name); 397 err = PTR_ERR(fallback_tfm); 398 goto out; 399 } 400 401 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); 402 if (IS_ERR(child_shash)) { 403 pr_warning("Child shash '%s' could not be loaded!\n", 404 n2alg->child_alg); 405 err = PTR_ERR(child_shash); 406 goto out_free_fallback; 407 } 408 409 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 410 crypto_ahash_reqsize(fallback_tfm))); 411 412 ctx->child_shash = child_shash; 413 ctx->base.fallback_tfm = fallback_tfm; 414 return 0; 415 416out_free_fallback: 417 crypto_free_ahash(fallback_tfm); 418 419out: 420 return err; 421} 422 423static void n2_hmac_cra_exit(struct crypto_tfm *tfm) 424{ 425 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 426 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 427 428 crypto_free_ahash(ctx->base.fallback_tfm); 429 crypto_free_shash(ctx->child_shash); 430} 431 432static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, 433 unsigned int keylen) 434{ 435 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 436 struct crypto_shash *child_shash = ctx->child_shash; 437 struct crypto_ahash *fallback_tfm; 438 struct { 439 struct shash_desc shash; 440 char ctx[crypto_shash_descsize(child_shash)]; 441 } desc; 442 int err, bs, ds; 443 444 fallback_tfm = ctx->base.fallback_tfm; 445 err = crypto_ahash_setkey(fallback_tfm, key, keylen); 446 if (err) 447 return err; 448 449 desc.shash.tfm = child_shash; 450 desc.shash.flags = crypto_ahash_get_flags(tfm) & 451 CRYPTO_TFM_REQ_MAY_SLEEP; 452 453 bs = crypto_shash_blocksize(child_shash); 454 ds = crypto_shash_digestsize(child_shash); 455 BUG_ON(ds > N2_HASH_KEY_MAX); 456 if (keylen > bs) { 457 err = crypto_shash_digest(&desc.shash, key, keylen, 458 ctx->hash_key); 459 if (err) 460 return err; 461 keylen = ds; 462 } else if (keylen <= N2_HASH_KEY_MAX) 463 memcpy(ctx->hash_key, key, keylen); 464 465 ctx->hash_key_len = keylen; 466 467 return err; 468} 469 470static unsigned long wait_for_tail(struct spu_queue *qp) 471{ 472 unsigned long head, hv_ret; 473 474 do { 475 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 476 if (hv_ret != HV_EOK) { 477 pr_err("Hypervisor error on gethead\n"); 478 break; 479 } 480 if (head == qp->tail) { 481 qp->head = head; 482 break; 483 } 484 } while (1); 485 return hv_ret; 486} 487 488static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, 489 struct cwq_initial_entry *ent) 490{ 491 unsigned long hv_ret = spu_queue_submit(qp, ent); 492 493 if (hv_ret == HV_EOK) 494 hv_ret = wait_for_tail(qp); 495 496 return hv_ret; 497} 498 499static int n2_do_async_digest(struct ahash_request *req, 500 unsigned int auth_type, unsigned int digest_size, 501 unsigned int result_size, void *hash_loc, 502 unsigned long auth_key, unsigned int auth_key_len) 503{ 504 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 505 struct cwq_initial_entry *ent; 506 struct crypto_hash_walk walk; 507 struct spu_queue *qp; 508 unsigned long flags; 509 int err = -ENODEV; 510 int nbytes, cpu; 511 512 /* The total effective length of the operation may not 513 * exceed 2^16. 514 */ 515 if (unlikely(req->nbytes > (1 << 16))) { 516 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 517 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 518 519 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 520 rctx->fallback_req.base.flags = 521 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 522 rctx->fallback_req.nbytes = req->nbytes; 523 rctx->fallback_req.src = req->src; 524 rctx->fallback_req.result = req->result; 525 526 return crypto_ahash_digest(&rctx->fallback_req); 527 } 528 529 nbytes = crypto_hash_walk_first(req, &walk); 530 531 cpu = get_cpu(); 532 qp = cpu_to_cwq[cpu]; 533 if (!qp) 534 goto out; 535 536 spin_lock_irqsave(&qp->lock, flags); 537 538 ent = qp->q + qp->tail; 539 540 ent->control = control_word_base(nbytes, auth_key_len, 0, 541 auth_type, digest_size, 542 false, true, false, false, 543 OPCODE_INPLACE_BIT | 544 OPCODE_AUTH_MAC); 545 ent->src_addr = __pa(walk.data); 546 ent->auth_key_addr = auth_key; 547 ent->auth_iv_addr = __pa(hash_loc); 548 ent->final_auth_state_addr = 0UL; 549 ent->enc_key_addr = 0UL; 550 ent->enc_iv_addr = 0UL; 551 ent->dest_addr = __pa(hash_loc); 552 553 nbytes = crypto_hash_walk_done(&walk, 0); 554 while (nbytes > 0) { 555 ent = spu_queue_next(qp, ent); 556 557 ent->control = (nbytes - 1); 558 ent->src_addr = __pa(walk.data); 559 ent->auth_key_addr = 0UL; 560 ent->auth_iv_addr = 0UL; 561 ent->final_auth_state_addr = 0UL; 562 ent->enc_key_addr = 0UL; 563 ent->enc_iv_addr = 0UL; 564 ent->dest_addr = 0UL; 565 566 nbytes = crypto_hash_walk_done(&walk, 0); 567 } 568 ent->control |= CONTROL_END_OF_BLOCK; 569 570 if (submit_and_wait_for_tail(qp, ent) != HV_EOK) 571 err = -EINVAL; 572 else 573 err = 0; 574 575 spin_unlock_irqrestore(&qp->lock, flags); 576 577 if (!err) 578 memcpy(req->result, hash_loc, result_size); 579out: 580 put_cpu(); 581 582 return err; 583} 584 585static int n2_hash_async_digest(struct ahash_request *req) 586{ 587 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); 588 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 589 int ds; 590 591 ds = n2alg->digest_size; 592 if (unlikely(req->nbytes == 0)) { 593 memcpy(req->result, n2alg->hash_zero, ds); 594 return 0; 595 } 596 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); 597 598 return n2_do_async_digest(req, n2alg->auth_type, 599 n2alg->hw_op_hashsz, ds, 600 &rctx->u, 0UL, 0); 601} 602 603static int n2_hmac_async_digest(struct ahash_request *req) 604{ 605 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); 606 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 607 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 608 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 609 int ds; 610 611 ds = n2alg->derived.digest_size; 612 if (unlikely(req->nbytes == 0) || 613 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { 614 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 615 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 616 617 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 618 rctx->fallback_req.base.flags = 619 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 620 rctx->fallback_req.nbytes = req->nbytes; 621 rctx->fallback_req.src = req->src; 622 rctx->fallback_req.result = req->result; 623 624 return crypto_ahash_digest(&rctx->fallback_req); 625 } 626 memcpy(&rctx->u, n2alg->derived.hash_init, 627 n2alg->derived.hw_op_hashsz); 628 629 return n2_do_async_digest(req, n2alg->derived.hmac_type, 630 n2alg->derived.hw_op_hashsz, ds, 631 &rctx->u, 632 __pa(&ctx->hash_key), 633 ctx->hash_key_len); 634} 635 636struct n2_cipher_context { 637 int key_len; 638 int enc_type; 639 union { 640 u8 aes[AES_MAX_KEY_SIZE]; 641 u8 des[DES_KEY_SIZE]; 642 u8 des3[3 * DES_KEY_SIZE]; 643 u8 arc4[258]; /* S-box, X, Y */ 644 } key; 645}; 646 647#define N2_CHUNK_ARR_LEN 16 648 649struct n2_crypto_chunk { 650 struct list_head entry; 651 unsigned long iv_paddr : 44; 652 unsigned long arr_len : 20; 653 unsigned long dest_paddr; 654 unsigned long dest_final; 655 struct { 656 unsigned long src_paddr : 44; 657 unsigned long src_len : 20; 658 } arr[N2_CHUNK_ARR_LEN]; 659}; 660 661struct n2_request_context { 662 struct ablkcipher_walk walk; 663 struct list_head chunk_list; 664 struct n2_crypto_chunk chunk; 665 u8 temp_iv[16]; 666}; 667 668/* The SPU allows some level of flexibility for partial cipher blocks 669 * being specified in a descriptor. 670 * 671 * It merely requires that every descriptor's length field is at least 672 * as large as the cipher block size. This means that a cipher block 673 * can span at most 2 descriptors. However, this does not allow a 674 * partial block to span into the final descriptor as that would 675 * violate the rule (since every descriptor's length must be at lest 676 * the block size). So, for example, assuming an 8 byte block size: 677 * 678 * 0xe --> 0xa --> 0x8 679 * 680 * is a valid length sequence, whereas: 681 * 682 * 0xe --> 0xb --> 0x7 683 * 684 * is not a valid sequence. 685 */ 686 687struct n2_cipher_alg { 688 struct list_head entry; 689 u8 enc_type; 690 struct crypto_alg alg; 691}; 692 693static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) 694{ 695 struct crypto_alg *alg = tfm->__crt_alg; 696 697 return container_of(alg, struct n2_cipher_alg, alg); 698} 699 700struct n2_cipher_request_context { 701 struct ablkcipher_walk walk; 702}; 703 704static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 705 unsigned int keylen) 706{ 707 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 708 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 709 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 710 711 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); 712 713 switch (keylen) { 714 case AES_KEYSIZE_128: 715 ctx->enc_type |= ENC_TYPE_ALG_AES128; 716 break; 717 case AES_KEYSIZE_192: 718 ctx->enc_type |= ENC_TYPE_ALG_AES192; 719 break; 720 case AES_KEYSIZE_256: 721 ctx->enc_type |= ENC_TYPE_ALG_AES256; 722 break; 723 default: 724 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 725 return -EINVAL; 726 } 727 728 ctx->key_len = keylen; 729 memcpy(ctx->key.aes, key, keylen); 730 return 0; 731} 732 733static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 734 unsigned int keylen) 735{ 736 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 737 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 738 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 739 u32 tmp[DES_EXPKEY_WORDS]; 740 int err; 741 742 ctx->enc_type = n2alg->enc_type; 743 744 if (keylen != DES_KEY_SIZE) { 745 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 746 return -EINVAL; 747 } 748 749 err = des_ekey(tmp, key); 750 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { 751 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 752 return -EINVAL; 753 } 754 755 ctx->key_len = keylen; 756 memcpy(ctx->key.des, key, keylen); 757 return 0; 758} 759 760static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 761 unsigned int keylen) 762{ 763 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 764 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 765 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 766 767 ctx->enc_type = n2alg->enc_type; 768 769 if (keylen != (3 * DES_KEY_SIZE)) { 770 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 771 return -EINVAL; 772 } 773 ctx->key_len = keylen; 774 memcpy(ctx->key.des3, key, keylen); 775 return 0; 776} 777 778static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 779 unsigned int keylen) 780{ 781 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 782 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 783 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 784 u8 *s = ctx->key.arc4; 785 u8 *x = s + 256; 786 u8 *y = x + 1; 787 int i, j, k; 788 789 ctx->enc_type = n2alg->enc_type; 790 791 j = k = 0; 792 *x = 0; 793 *y = 0; 794 for (i = 0; i < 256; i++) 795 s[i] = i; 796 for (i = 0; i < 256; i++) { 797 u8 a = s[i]; 798 j = (j + key[k] + a) & 0xff; 799 s[i] = s[j]; 800 s[j] = a; 801 if (++k >= keylen) 802 k = 0; 803 } 804 805 return 0; 806} 807 808static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) 809{ 810 int this_len = nbytes; 811 812 this_len -= (nbytes & (block_size - 1)); 813 return this_len > (1 << 16) ? (1 << 16) : this_len; 814} 815 816static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, 817 struct spu_queue *qp, bool encrypt) 818{ 819 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 820 struct cwq_initial_entry *ent; 821 bool in_place; 822 int i; 823 824 ent = spu_queue_alloc(qp, cp->arr_len); 825 if (!ent) { 826 pr_info("queue_alloc() of %d fails\n", 827 cp->arr_len); 828 return -EBUSY; 829 } 830 831 in_place = (cp->dest_paddr == cp->arr[0].src_paddr); 832 833 ent->control = control_word_base(cp->arr[0].src_len, 834 0, ctx->enc_type, 0, 0, 835 false, true, false, encrypt, 836 OPCODE_ENCRYPT | 837 (in_place ? OPCODE_INPLACE_BIT : 0)); 838 ent->src_addr = cp->arr[0].src_paddr; 839 ent->auth_key_addr = 0UL; 840 ent->auth_iv_addr = 0UL; 841 ent->final_auth_state_addr = 0UL; 842 ent->enc_key_addr = __pa(&ctx->key); 843 ent->enc_iv_addr = cp->iv_paddr; 844 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); 845 846 for (i = 1; i < cp->arr_len; i++) { 847 ent = spu_queue_next(qp, ent); 848 849 ent->control = cp->arr[i].src_len - 1; 850 ent->src_addr = cp->arr[i].src_paddr; 851 ent->auth_key_addr = 0UL; 852 ent->auth_iv_addr = 0UL; 853 ent->final_auth_state_addr = 0UL; 854 ent->enc_key_addr = 0UL; 855 ent->enc_iv_addr = 0UL; 856 ent->dest_addr = 0UL; 857 } 858 ent->control |= CONTROL_END_OF_BLOCK; 859 860 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; 861} 862 863static int n2_compute_chunks(struct ablkcipher_request *req) 864{ 865 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 866 struct ablkcipher_walk *walk = &rctx->walk; 867 struct n2_crypto_chunk *chunk; 868 unsigned long dest_prev; 869 unsigned int tot_len; 870 bool prev_in_place; 871 int err, nbytes; 872 873 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); 874 err = ablkcipher_walk_phys(req, walk); 875 if (err) 876 return err; 877 878 INIT_LIST_HEAD(&rctx->chunk_list); 879 880 chunk = &rctx->chunk; 881 INIT_LIST_HEAD(&chunk->entry); 882 883 chunk->iv_paddr = 0UL; 884 chunk->arr_len = 0; 885 chunk->dest_paddr = 0UL; 886 887 prev_in_place = false; 888 dest_prev = ~0UL; 889 tot_len = 0; 890 891 while ((nbytes = walk->nbytes) != 0) { 892 unsigned long dest_paddr, src_paddr; 893 bool in_place; 894 int this_len; 895 896 src_paddr = (page_to_phys(walk->src.page) + 897 walk->src.offset); 898 dest_paddr = (page_to_phys(walk->dst.page) + 899 walk->dst.offset); 900 in_place = (src_paddr == dest_paddr); 901 this_len = cipher_descriptor_len(nbytes, walk->blocksize); 902 903 if (chunk->arr_len != 0) { 904 if (in_place != prev_in_place || 905 (!prev_in_place && 906 dest_paddr != dest_prev) || 907 chunk->arr_len == N2_CHUNK_ARR_LEN || 908 tot_len + this_len > (1 << 16)) { 909 chunk->dest_final = dest_prev; 910 list_add_tail(&chunk->entry, 911 &rctx->chunk_list); 912 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); 913 if (!chunk) { 914 err = -ENOMEM; 915 break; 916 } 917 INIT_LIST_HEAD(&chunk->entry); 918 } 919 } 920 if (chunk->arr_len == 0) { 921 chunk->dest_paddr = dest_paddr; 922 tot_len = 0; 923 } 924 chunk->arr[chunk->arr_len].src_paddr = src_paddr; 925 chunk->arr[chunk->arr_len].src_len = this_len; 926 chunk->arr_len++; 927 928 dest_prev = dest_paddr + this_len; 929 prev_in_place = in_place; 930 tot_len += this_len; 931 932 err = ablkcipher_walk_done(req, walk, nbytes - this_len); 933 if (err) 934 break; 935 } 936 if (!err && chunk->arr_len != 0) { 937 chunk->dest_final = dest_prev; 938 list_add_tail(&chunk->entry, &rctx->chunk_list); 939 } 940 941 return err; 942} 943 944static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) 945{ 946 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 947 struct n2_crypto_chunk *c, *tmp; 948 949 if (final_iv) 950 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); 951 952 ablkcipher_walk_complete(&rctx->walk); 953 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 954 list_del(&c->entry); 955 if (unlikely(c != &rctx->chunk)) 956 kfree(c); 957 } 958 959} 960 961static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) 962{ 963 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 964 struct crypto_tfm *tfm = req->base.tfm; 965 int err = n2_compute_chunks(req); 966 struct n2_crypto_chunk *c, *tmp; 967 unsigned long flags, hv_ret; 968 struct spu_queue *qp; 969 970 if (err) 971 return err; 972 973 qp = cpu_to_cwq[get_cpu()]; 974 err = -ENODEV; 975 if (!qp) 976 goto out; 977 978 spin_lock_irqsave(&qp->lock, flags); 979 980 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 981 err = __n2_crypt_chunk(tfm, c, qp, encrypt); 982 if (err) 983 break; 984 list_del(&c->entry); 985 if (unlikely(c != &rctx->chunk)) 986 kfree(c); 987 } 988 if (!err) { 989 hv_ret = wait_for_tail(qp); 990 if (hv_ret != HV_EOK) 991 err = -EINVAL; 992 } 993 994 spin_unlock_irqrestore(&qp->lock, flags); 995 996 put_cpu(); 997 998out: 999 n2_chunk_complete(req, NULL); 1000 return err; 1001} 1002 1003static int n2_encrypt_ecb(struct ablkcipher_request *req) 1004{ 1005 return n2_do_ecb(req, true); 1006} 1007 1008static int n2_decrypt_ecb(struct ablkcipher_request *req) 1009{ 1010 return n2_do_ecb(req, false); 1011} 1012 1013static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) 1014{ 1015 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 1016 struct crypto_tfm *tfm = req->base.tfm; 1017 unsigned long flags, hv_ret, iv_paddr; 1018 int err = n2_compute_chunks(req); 1019 struct n2_crypto_chunk *c, *tmp; 1020 struct spu_queue *qp; 1021 void *final_iv_addr; 1022 1023 final_iv_addr = NULL; 1024 1025 if (err) 1026 return err; 1027 1028 qp = cpu_to_cwq[get_cpu()]; 1029 err = -ENODEV; 1030 if (!qp) 1031 goto out; 1032 1033 spin_lock_irqsave(&qp->lock, flags); 1034 1035 if (encrypt) { 1036 iv_paddr = __pa(rctx->walk.iv); 1037 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, 1038 entry) { 1039 c->iv_paddr = iv_paddr; 1040 err = __n2_crypt_chunk(tfm, c, qp, true); 1041 if (err) 1042 break; 1043 iv_paddr = c->dest_final - rctx->walk.blocksize; 1044 list_del(&c->entry); 1045 if (unlikely(c != &rctx->chunk)) 1046 kfree(c); 1047 } 1048 final_iv_addr = __va(iv_paddr); 1049 } else { 1050 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, 1051 entry) { 1052 if (c == &rctx->chunk) { 1053 iv_paddr = __pa(rctx->walk.iv); 1054 } else { 1055 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + 1056 tmp->arr[tmp->arr_len-1].src_len - 1057 rctx->walk.blocksize); 1058 } 1059 if (!final_iv_addr) { 1060 unsigned long pa; 1061 1062 pa = (c->arr[c->arr_len-1].src_paddr + 1063 c->arr[c->arr_len-1].src_len - 1064 rctx->walk.blocksize); 1065 final_iv_addr = rctx->temp_iv; 1066 memcpy(rctx->temp_iv, __va(pa), 1067 rctx->walk.blocksize); 1068 } 1069 c->iv_paddr = iv_paddr; 1070 err = __n2_crypt_chunk(tfm, c, qp, false); 1071 if (err) 1072 break; 1073 list_del(&c->entry); 1074 if (unlikely(c != &rctx->chunk)) 1075 kfree(c); 1076 } 1077 } 1078 if (!err) { 1079 hv_ret = wait_for_tail(qp); 1080 if (hv_ret != HV_EOK) 1081 err = -EINVAL; 1082 } 1083 1084 spin_unlock_irqrestore(&qp->lock, flags); 1085 1086 put_cpu(); 1087 1088out: 1089 n2_chunk_complete(req, err ? NULL : final_iv_addr); 1090 return err; 1091} 1092 1093static int n2_encrypt_chaining(struct ablkcipher_request *req) 1094{ 1095 return n2_do_chaining(req, true); 1096} 1097 1098static int n2_decrypt_chaining(struct ablkcipher_request *req) 1099{ 1100 return n2_do_chaining(req, false); 1101} 1102 1103struct n2_cipher_tmpl { 1104 const char *name; 1105 const char *drv_name; 1106 u8 block_size; 1107 u8 enc_type; 1108 struct ablkcipher_alg ablkcipher; 1109}; 1110 1111static const struct n2_cipher_tmpl cipher_tmpls[] = { 1112 /* ARC4: only ECB is supported (chaining bits ignored) */ 1113 { .name = "ecb(arc4)", 1114 .drv_name = "ecb-arc4", 1115 .block_size = 1, 1116 .enc_type = (ENC_TYPE_ALG_RC4_STREAM | 1117 ENC_TYPE_CHAINING_ECB), 1118 .ablkcipher = { 1119 .min_keysize = 1, 1120 .max_keysize = 256, 1121 .setkey = n2_arc4_setkey, 1122 .encrypt = n2_encrypt_ecb, 1123 .decrypt = n2_decrypt_ecb, 1124 }, 1125 }, 1126 1127 /* DES: ECB CBC and CFB are supported */ 1128 { .name = "ecb(des)", 1129 .drv_name = "ecb-des", 1130 .block_size = DES_BLOCK_SIZE, 1131 .enc_type = (ENC_TYPE_ALG_DES | 1132 ENC_TYPE_CHAINING_ECB), 1133 .ablkcipher = { 1134 .min_keysize = DES_KEY_SIZE, 1135 .max_keysize = DES_KEY_SIZE, 1136 .setkey = n2_des_setkey, 1137 .encrypt = n2_encrypt_ecb, 1138 .decrypt = n2_decrypt_ecb, 1139 }, 1140 }, 1141 { .name = "cbc(des)", 1142 .drv_name = "cbc-des", 1143 .block_size = DES_BLOCK_SIZE, 1144 .enc_type = (ENC_TYPE_ALG_DES | 1145 ENC_TYPE_CHAINING_CBC), 1146 .ablkcipher = { 1147 .ivsize = DES_BLOCK_SIZE, 1148 .min_keysize = DES_KEY_SIZE, 1149 .max_keysize = DES_KEY_SIZE, 1150 .setkey = n2_des_setkey, 1151 .encrypt = n2_encrypt_chaining, 1152 .decrypt = n2_decrypt_chaining, 1153 }, 1154 }, 1155 { .name = "cfb(des)", 1156 .drv_name = "cfb-des", 1157 .block_size = DES_BLOCK_SIZE, 1158 .enc_type = (ENC_TYPE_ALG_DES | 1159 ENC_TYPE_CHAINING_CFB), 1160 .ablkcipher = { 1161 .min_keysize = DES_KEY_SIZE, 1162 .max_keysize = DES_KEY_SIZE, 1163 .setkey = n2_des_setkey, 1164 .encrypt = n2_encrypt_chaining, 1165 .decrypt = n2_decrypt_chaining, 1166 }, 1167 }, 1168 1169 /* 3DES: ECB CBC and CFB are supported */ 1170 { .name = "ecb(des3_ede)", 1171 .drv_name = "ecb-3des", 1172 .block_size = DES_BLOCK_SIZE, 1173 .enc_type = (ENC_TYPE_ALG_3DES | 1174 ENC_TYPE_CHAINING_ECB), 1175 .ablkcipher = { 1176 .min_keysize = 3 * DES_KEY_SIZE, 1177 .max_keysize = 3 * DES_KEY_SIZE, 1178 .setkey = n2_3des_setkey, 1179 .encrypt = n2_encrypt_ecb, 1180 .decrypt = n2_decrypt_ecb, 1181 }, 1182 }, 1183 { .name = "cbc(des3_ede)", 1184 .drv_name = "cbc-3des", 1185 .block_size = DES_BLOCK_SIZE, 1186 .enc_type = (ENC_TYPE_ALG_3DES | 1187 ENC_TYPE_CHAINING_CBC), 1188 .ablkcipher = { 1189 .ivsize = DES_BLOCK_SIZE, 1190 .min_keysize = 3 * DES_KEY_SIZE, 1191 .max_keysize = 3 * DES_KEY_SIZE, 1192 .setkey = n2_3des_setkey, 1193 .encrypt = n2_encrypt_chaining, 1194 .decrypt = n2_decrypt_chaining, 1195 }, 1196 }, 1197 { .name = "cfb(des3_ede)", 1198 .drv_name = "cfb-3des", 1199 .block_size = DES_BLOCK_SIZE, 1200 .enc_type = (ENC_TYPE_ALG_3DES | 1201 ENC_TYPE_CHAINING_CFB), 1202 .ablkcipher = { 1203 .min_keysize = 3 * DES_KEY_SIZE, 1204 .max_keysize = 3 * DES_KEY_SIZE, 1205 .setkey = n2_3des_setkey, 1206 .encrypt = n2_encrypt_chaining, 1207 .decrypt = n2_decrypt_chaining, 1208 }, 1209 }, 1210 /* AES: ECB CBC and CTR are supported */ 1211 { .name = "ecb(aes)", 1212 .drv_name = "ecb-aes", 1213 .block_size = AES_BLOCK_SIZE, 1214 .enc_type = (ENC_TYPE_ALG_AES128 | 1215 ENC_TYPE_CHAINING_ECB), 1216 .ablkcipher = { 1217 .min_keysize = AES_MIN_KEY_SIZE, 1218 .max_keysize = AES_MAX_KEY_SIZE, 1219 .setkey = n2_aes_setkey, 1220 .encrypt = n2_encrypt_ecb, 1221 .decrypt = n2_decrypt_ecb, 1222 }, 1223 }, 1224 { .name = "cbc(aes)", 1225 .drv_name = "cbc-aes", 1226 .block_size = AES_BLOCK_SIZE, 1227 .enc_type = (ENC_TYPE_ALG_AES128 | 1228 ENC_TYPE_CHAINING_CBC), 1229 .ablkcipher = { 1230 .ivsize = AES_BLOCK_SIZE, 1231 .min_keysize = AES_MIN_KEY_SIZE, 1232 .max_keysize = AES_MAX_KEY_SIZE, 1233 .setkey = n2_aes_setkey, 1234 .encrypt = n2_encrypt_chaining, 1235 .decrypt = n2_decrypt_chaining, 1236 }, 1237 }, 1238 { .name = "ctr(aes)", 1239 .drv_name = "ctr-aes", 1240 .block_size = AES_BLOCK_SIZE, 1241 .enc_type = (ENC_TYPE_ALG_AES128 | 1242 ENC_TYPE_CHAINING_COUNTER), 1243 .ablkcipher = { 1244 .ivsize = AES_BLOCK_SIZE, 1245 .min_keysize = AES_MIN_KEY_SIZE, 1246 .max_keysize = AES_MAX_KEY_SIZE, 1247 .setkey = n2_aes_setkey, 1248 .encrypt = n2_encrypt_chaining, 1249 .decrypt = n2_encrypt_chaining, 1250 }, 1251 }, 1252 1253}; 1254#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) 1255 1256static LIST_HEAD(cipher_algs); 1257 1258struct n2_hash_tmpl { 1259 const char *name; 1260 const char *hash_zero; 1261 const u32 *hash_init; 1262 u8 hw_op_hashsz; 1263 u8 digest_size; 1264 u8 block_size; 1265 u8 auth_type; 1266 u8 hmac_type; 1267}; 1268 1269static const char md5_zero[MD5_DIGEST_SIZE] = { 1270 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, 1271 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, 1272}; 1273static const u32 md5_init[MD5_HASH_WORDS] = { 1274 cpu_to_le32(0x67452301), 1275 cpu_to_le32(0xefcdab89), 1276 cpu_to_le32(0x98badcfe), 1277 cpu_to_le32(0x10325476), 1278}; 1279static const char sha1_zero[SHA1_DIGEST_SIZE] = { 1280 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 1281 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 1282 0x07, 0x09 1283}; 1284static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 1285 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 1286}; 1287static const char sha256_zero[SHA256_DIGEST_SIZE] = { 1288 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 1289 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 1290 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 1291 0x1b, 0x78, 0x52, 0xb8, 0x55 1292}; 1293static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 1294 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 1295 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 1296}; 1297static const char sha224_zero[SHA224_DIGEST_SIZE] = { 1298 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, 1299 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, 1300 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, 1301 0x2f 1302}; 1303static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 1304 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 1305 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 1306}; 1307 1308static const struct n2_hash_tmpl hash_tmpls[] = { 1309 { .name = "md5", 1310 .hash_zero = md5_zero, 1311 .hash_init = md5_init, 1312 .auth_type = AUTH_TYPE_MD5, 1313 .hmac_type = AUTH_TYPE_HMAC_MD5, 1314 .hw_op_hashsz = MD5_DIGEST_SIZE, 1315 .digest_size = MD5_DIGEST_SIZE, 1316 .block_size = MD5_HMAC_BLOCK_SIZE }, 1317 { .name = "sha1", 1318 .hash_zero = sha1_zero, 1319 .hash_init = sha1_init, 1320 .auth_type = AUTH_TYPE_SHA1, 1321 .hmac_type = AUTH_TYPE_HMAC_SHA1, 1322 .hw_op_hashsz = SHA1_DIGEST_SIZE, 1323 .digest_size = SHA1_DIGEST_SIZE, 1324 .block_size = SHA1_BLOCK_SIZE }, 1325 { .name = "sha256", 1326 .hash_zero = sha256_zero, 1327 .hash_init = sha256_init, 1328 .auth_type = AUTH_TYPE_SHA256, 1329 .hmac_type = AUTH_TYPE_HMAC_SHA256, 1330 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1331 .digest_size = SHA256_DIGEST_SIZE, 1332 .block_size = SHA256_BLOCK_SIZE }, 1333 { .name = "sha224", 1334 .hash_zero = sha224_zero, 1335 .hash_init = sha224_init, 1336 .auth_type = AUTH_TYPE_SHA256, 1337 .hmac_type = AUTH_TYPE_RESERVED, 1338 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1339 .digest_size = SHA224_DIGEST_SIZE, 1340 .block_size = SHA224_BLOCK_SIZE }, 1341}; 1342#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 1343 1344static LIST_HEAD(ahash_algs); 1345static LIST_HEAD(hmac_algs); 1346 1347static int algs_registered; 1348 1349static void __n2_unregister_algs(void) 1350{ 1351 struct n2_cipher_alg *cipher, *cipher_tmp; 1352 struct n2_ahash_alg *alg, *alg_tmp; 1353 struct n2_hmac_alg *hmac, *hmac_tmp; 1354 1355 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 1356 crypto_unregister_alg(&cipher->alg); 1357 list_del(&cipher->entry); 1358 kfree(cipher); 1359 } 1360 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { 1361 crypto_unregister_ahash(&hmac->derived.alg); 1362 list_del(&hmac->derived.entry); 1363 kfree(hmac); 1364 } 1365 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 1366 crypto_unregister_ahash(&alg->alg); 1367 list_del(&alg->entry); 1368 kfree(alg); 1369 } 1370} 1371 1372static int n2_cipher_cra_init(struct crypto_tfm *tfm) 1373{ 1374 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); 1375 return 0; 1376} 1377 1378static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) 1379{ 1380 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1381 struct crypto_alg *alg; 1382 int err; 1383 1384 if (!p) 1385 return -ENOMEM; 1386 1387 alg = &p->alg; 1388 1389 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1390 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); 1391 alg->cra_priority = N2_CRA_PRIORITY; 1392 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 1393 alg->cra_blocksize = tmpl->block_size; 1394 p->enc_type = tmpl->enc_type; 1395 alg->cra_ctxsize = sizeof(struct n2_cipher_context); 1396 alg->cra_type = &crypto_ablkcipher_type; 1397 alg->cra_u.ablkcipher = tmpl->ablkcipher; 1398 alg->cra_init = n2_cipher_cra_init; 1399 alg->cra_module = THIS_MODULE; 1400 1401 list_add(&p->entry, &cipher_algs); 1402 err = crypto_register_alg(alg); 1403 if (err) { 1404 pr_err("%s alg registration failed\n", alg->cra_name); 1405 list_del(&p->entry); 1406 kfree(p); 1407 } else { 1408 pr_info("%s alg registered\n", alg->cra_name); 1409 } 1410 return err; 1411} 1412 1413static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) 1414{ 1415 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1416 struct ahash_alg *ahash; 1417 struct crypto_alg *base; 1418 int err; 1419 1420 if (!p) 1421 return -ENOMEM; 1422 1423 p->child_alg = n2ahash->alg.halg.base.cra_name; 1424 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); 1425 INIT_LIST_HEAD(&p->derived.entry); 1426 1427 ahash = &p->derived.alg; 1428 ahash->digest = n2_hmac_async_digest; 1429 ahash->setkey = n2_hmac_async_setkey; 1430 1431 base = &ahash->halg.base; 1432 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); 1433 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); 1434 1435 base->cra_ctxsize = sizeof(struct n2_hmac_ctx); 1436 base->cra_init = n2_hmac_cra_init; 1437 base->cra_exit = n2_hmac_cra_exit; 1438 1439 list_add(&p->derived.entry, &hmac_algs); 1440 err = crypto_register_ahash(ahash); 1441 if (err) { 1442 pr_err("%s alg registration failed\n", base->cra_name); 1443 list_del(&p->derived.entry); 1444 kfree(p); 1445 } else { 1446 pr_info("%s alg registered\n", base->cra_name); 1447 } 1448 return err; 1449} 1450 1451static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) 1452{ 1453 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1454 struct hash_alg_common *halg; 1455 struct crypto_alg *base; 1456 struct ahash_alg *ahash; 1457 int err; 1458 1459 if (!p) 1460 return -ENOMEM; 1461 1462 p->hash_zero = tmpl->hash_zero; 1463 p->hash_init = tmpl->hash_init; 1464 p->auth_type = tmpl->auth_type; 1465 p->hmac_type = tmpl->hmac_type; 1466 p->hw_op_hashsz = tmpl->hw_op_hashsz; 1467 p->digest_size = tmpl->digest_size; 1468 1469 ahash = &p->alg; 1470 ahash->init = n2_hash_async_init; 1471 ahash->update = n2_hash_async_update; 1472 ahash->final = n2_hash_async_final; 1473 ahash->finup = n2_hash_async_finup; 1474 ahash->digest = n2_hash_async_digest; 1475 1476 halg = &ahash->halg; 1477 halg->digestsize = tmpl->digest_size; 1478 1479 base = &halg->base; 1480 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1481 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); 1482 base->cra_priority = N2_CRA_PRIORITY; 1483 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; 1484 base->cra_blocksize = tmpl->block_size; 1485 base->cra_ctxsize = sizeof(struct n2_hash_ctx); 1486 base->cra_module = THIS_MODULE; 1487 base->cra_init = n2_hash_cra_init; 1488 base->cra_exit = n2_hash_cra_exit; 1489 1490 list_add(&p->entry, &ahash_algs); 1491 err = crypto_register_ahash(ahash); 1492 if (err) { 1493 pr_err("%s alg registration failed\n", base->cra_name); 1494 list_del(&p->entry); 1495 kfree(p); 1496 } else { 1497 pr_info("%s alg registered\n", base->cra_name); 1498 } 1499 if (!err && p->hmac_type != AUTH_TYPE_RESERVED) 1500 err = __n2_register_one_hmac(p); 1501 return err; 1502} 1503 1504static int __devinit n2_register_algs(void) 1505{ 1506 int i, err = 0; 1507 1508 mutex_lock(&spu_lock); 1509 if (algs_registered++) 1510 goto out; 1511 1512 for (i = 0; i < NUM_HASH_TMPLS; i++) { 1513 err = __n2_register_one_ahash(&hash_tmpls[i]); 1514 if (err) { 1515 __n2_unregister_algs(); 1516 goto out; 1517 } 1518 } 1519 for (i = 0; i < NUM_CIPHER_TMPLS; i++) { 1520 err = __n2_register_one_cipher(&cipher_tmpls[i]); 1521 if (err) { 1522 __n2_unregister_algs(); 1523 goto out; 1524 } 1525 } 1526 1527out: 1528 mutex_unlock(&spu_lock); 1529 return err; 1530} 1531 1532static void __exit n2_unregister_algs(void) 1533{ 1534 mutex_lock(&spu_lock); 1535 if (!--algs_registered) 1536 __n2_unregister_algs(); 1537 mutex_unlock(&spu_lock); 1538} 1539 1540/* To map CWQ queues to interrupt sources, the hypervisor API provides 1541 * a devino. This isn't very useful to us because all of the 1542 * interrupts listed in the device_node have been translated to 1543 * Linux virtual IRQ cookie numbers. 1544 * 1545 * So we have to back-translate, going through the 'intr' and 'ino' 1546 * property tables of the n2cp MDESC node, matching it with the OF 1547 * 'interrupts' property entries, in order to to figure out which 1548 * devino goes to which already-translated IRQ. 1549 */ 1550static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, 1551 unsigned long dev_ino) 1552{ 1553 const unsigned int *dev_intrs; 1554 unsigned int intr; 1555 int i; 1556 1557 for (i = 0; i < ip->num_intrs; i++) { 1558 if (ip->ino_table[i].ino == dev_ino) 1559 break; 1560 } 1561 if (i == ip->num_intrs) 1562 return -ENODEV; 1563 1564 intr = ip->ino_table[i].intr; 1565 1566 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); 1567 if (!dev_intrs) 1568 return -ENODEV; 1569 1570 for (i = 0; i < dev->archdata.num_irqs; i++) { 1571 if (dev_intrs[i] == intr) 1572 return i; 1573 } 1574 1575 return -ENODEV; 1576} 1577 1578static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, 1579 const char *irq_name, struct spu_queue *p, 1580 irq_handler_t handler) 1581{ 1582 unsigned long herr; 1583 int index; 1584 1585 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); 1586 if (herr) 1587 return -EINVAL; 1588 1589 index = find_devino_index(dev, ip, p->devino); 1590 if (index < 0) 1591 return index; 1592 1593 p->irq = dev->archdata.irqs[index]; 1594 1595 sprintf(p->irq_name, "%s-%d", irq_name, index); 1596 1597 return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, 1598 p->irq_name, p); 1599} 1600 1601static struct kmem_cache *queue_cache[2]; 1602 1603static void *new_queue(unsigned long q_type) 1604{ 1605 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); 1606} 1607 1608static void free_queue(void *p, unsigned long q_type) 1609{ 1610 return kmem_cache_free(queue_cache[q_type - 1], p); 1611} 1612 1613static int queue_cache_init(void) 1614{ 1615 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1616 queue_cache[HV_NCS_QTYPE_MAU - 1] = 1617 kmem_cache_create("mau_queue", 1618 (MAU_NUM_ENTRIES * 1619 MAU_ENTRY_SIZE), 1620 MAU_ENTRY_SIZE, 0, NULL); 1621 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1622 return -ENOMEM; 1623 1624 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) 1625 queue_cache[HV_NCS_QTYPE_CWQ - 1] = 1626 kmem_cache_create("cwq_queue", 1627 (CWQ_NUM_ENTRIES * 1628 CWQ_ENTRY_SIZE), 1629 CWQ_ENTRY_SIZE, 0, NULL); 1630 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 1631 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1632 return -ENOMEM; 1633 } 1634 return 0; 1635} 1636 1637static void queue_cache_destroy(void) 1638{ 1639 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1640 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1641} 1642 1643static int spu_queue_register(struct spu_queue *p, unsigned long q_type) 1644{ 1645 cpumask_var_t old_allowed; 1646 unsigned long hv_ret; 1647 1648 if (cpumask_empty(&p->sharing)) 1649 return -EINVAL; 1650 1651 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) 1652 return -ENOMEM; 1653 1654 cpumask_copy(old_allowed, ¤t->cpus_allowed); 1655 1656 set_cpus_allowed_ptr(current, &p->sharing); 1657 1658 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), 1659 CWQ_NUM_ENTRIES, &p->qhandle); 1660 if (!hv_ret) 1661 sun4v_ncs_sethead_marker(p->qhandle, 0); 1662 1663 set_cpus_allowed_ptr(current, old_allowed); 1664 1665 free_cpumask_var(old_allowed); 1666 1667 return (hv_ret ? -EINVAL : 0); 1668} 1669 1670static int spu_queue_setup(struct spu_queue *p) 1671{ 1672 int err; 1673 1674 p->q = new_queue(p->q_type); 1675 if (!p->q) 1676 return -ENOMEM; 1677 1678 err = spu_queue_register(p, p->q_type); 1679 if (err) { 1680 free_queue(p->q, p->q_type); 1681 p->q = NULL; 1682 } 1683 1684 return err; 1685} 1686 1687static void spu_queue_destroy(struct spu_queue *p) 1688{ 1689 unsigned long hv_ret; 1690 1691 if (!p->q) 1692 return; 1693 1694 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); 1695 1696 if (!hv_ret) 1697 free_queue(p->q, p->q_type); 1698} 1699 1700static void spu_list_destroy(struct list_head *list) 1701{ 1702 struct spu_queue *p, *n; 1703 1704 list_for_each_entry_safe(p, n, list, list) { 1705 int i; 1706 1707 for (i = 0; i < NR_CPUS; i++) { 1708 if (cpu_to_cwq[i] == p) 1709 cpu_to_cwq[i] = NULL; 1710 } 1711 1712 if (p->irq) { 1713 free_irq(p->irq, p); 1714 p->irq = 0; 1715 } 1716 spu_queue_destroy(p); 1717 list_del(&p->list); 1718 kfree(p); 1719 } 1720} 1721 1722/* Walk the backward arcs of a CWQ 'exec-unit' node, 1723 * gathering cpu membership information. 1724 */ 1725static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, 1726 struct platform_device *dev, 1727 u64 node, struct spu_queue *p, 1728 struct spu_queue **table) 1729{ 1730 u64 arc; 1731 1732 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { 1733 u64 tgt = mdesc_arc_target(mdesc, arc); 1734 const char *name = mdesc_node_name(mdesc, tgt); 1735 const u64 *id; 1736 1737 if (strcmp(name, "cpu")) 1738 continue; 1739 id = mdesc_get_property(mdesc, tgt, "id", NULL); 1740 if (table[*id] != NULL) { 1741 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", 1742 dev->dev.of_node->full_name); 1743 return -EINVAL; 1744 } 1745 cpu_set(*id, p->sharing); 1746 table[*id] = p; 1747 } 1748 return 0; 1749} 1750 1751/* Process an 'exec-unit' MDESC node of type 'cwq'. */ 1752static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, 1753 struct platform_device *dev, struct mdesc_handle *mdesc, 1754 u64 node, const char *iname, unsigned long q_type, 1755 irq_handler_t handler, struct spu_queue **table) 1756{ 1757 struct spu_queue *p; 1758 int err; 1759 1760 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 1761 if (!p) { 1762 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", 1763 dev->dev.of_node->full_name); 1764 return -ENOMEM; 1765 } 1766 1767 cpus_clear(p->sharing); 1768 spin_lock_init(&p->lock); 1769 p->q_type = q_type; 1770 INIT_LIST_HEAD(&p->jobs); 1771 list_add(&p->list, list); 1772 1773 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); 1774 if (err) 1775 return err; 1776 1777 err = spu_queue_setup(p); 1778 if (err) 1779 return err; 1780 1781 return spu_map_ino(dev, ip, iname, p, handler); 1782} 1783 1784static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, 1785 struct spu_mdesc_info *ip, struct list_head *list, 1786 const char *exec_name, unsigned long q_type, 1787 irq_handler_t handler, struct spu_queue **table) 1788{ 1789 int err = 0; 1790 u64 node; 1791 1792 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { 1793 const char *type; 1794 1795 type = mdesc_get_property(mdesc, node, "type", NULL); 1796 if (!type || strcmp(type, exec_name)) 1797 continue; 1798 1799 err = handle_exec_unit(ip, list, dev, mdesc, node, 1800 exec_name, q_type, handler, table); 1801 if (err) { 1802 spu_list_destroy(list); 1803 break; 1804 } 1805 } 1806 1807 return err; 1808} 1809 1810static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, 1811 struct spu_mdesc_info *ip) 1812{ 1813 const u64 *intr, *ino; 1814 int intr_len, ino_len; 1815 int i; 1816 1817 intr = mdesc_get_property(mdesc, node, "intr", &intr_len); 1818 if (!intr) 1819 return -ENODEV; 1820 1821 ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1822 if (!intr) 1823 return -ENODEV; 1824 1825 if (intr_len != ino_len) 1826 return -EINVAL; 1827 1828 ip->num_intrs = intr_len / sizeof(u64); 1829 ip->ino_table = kzalloc((sizeof(struct ino_blob) * 1830 ip->num_intrs), 1831 GFP_KERNEL); 1832 if (!ip->ino_table) 1833 return -ENOMEM; 1834 1835 for (i = 0; i < ip->num_intrs; i++) { 1836 struct ino_blob *b = &ip->ino_table[i]; 1837 b->intr = intr[i]; 1838 b->ino = ino[i]; 1839 } 1840 1841 return 0; 1842} 1843 1844static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, 1845 struct platform_device *dev, 1846 struct spu_mdesc_info *ip, 1847 const char *node_name) 1848{ 1849 const unsigned int *reg; 1850 u64 node; 1851 1852 reg = of_get_property(dev->dev.of_node, "reg", NULL); 1853 if (!reg) 1854 return -ENODEV; 1855 1856 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { 1857 const char *name; 1858 const u64 *chdl; 1859 1860 name = mdesc_get_property(mdesc, node, "name", NULL); 1861 if (!name || strcmp(name, node_name)) 1862 continue; 1863 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 1864 if (!chdl || (*chdl != *reg)) 1865 continue; 1866 ip->cfg_handle = *chdl; 1867 return get_irq_props(mdesc, node, ip); 1868 } 1869 1870 return -ENODEV; 1871} 1872 1873static unsigned long n2_spu_hvapi_major; 1874static unsigned long n2_spu_hvapi_minor; 1875 1876static int __devinit n2_spu_hvapi_register(void) 1877{ 1878 int err; 1879 1880 n2_spu_hvapi_major = 2; 1881 n2_spu_hvapi_minor = 0; 1882 1883 err = sun4v_hvapi_register(HV_GRP_NCS, 1884 n2_spu_hvapi_major, 1885 &n2_spu_hvapi_minor); 1886 1887 if (!err) 1888 pr_info("Registered NCS HVAPI version %lu.%lu\n", 1889 n2_spu_hvapi_major, 1890 n2_spu_hvapi_minor); 1891 1892 return err; 1893} 1894 1895static void n2_spu_hvapi_unregister(void) 1896{ 1897 sun4v_hvapi_unregister(HV_GRP_NCS); 1898} 1899 1900static int global_ref; 1901 1902static int __devinit grab_global_resources(void) 1903{ 1904 int err = 0; 1905 1906 mutex_lock(&spu_lock); 1907 1908 if (global_ref++) 1909 goto out; 1910 1911 err = n2_spu_hvapi_register(); 1912 if (err) 1913 goto out; 1914 1915 err = queue_cache_init(); 1916 if (err) 1917 goto out_hvapi_release; 1918 1919 err = -ENOMEM; 1920 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 1921 GFP_KERNEL); 1922 if (!cpu_to_cwq) 1923 goto out_queue_cache_destroy; 1924 1925 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 1926 GFP_KERNEL); 1927 if (!cpu_to_mau) 1928 goto out_free_cwq_table; 1929 1930 err = 0; 1931 1932out: 1933 if (err) 1934 global_ref--; 1935 mutex_unlock(&spu_lock); 1936 return err; 1937 1938out_free_cwq_table: 1939 kfree(cpu_to_cwq); 1940 cpu_to_cwq = NULL; 1941 1942out_queue_cache_destroy: 1943 queue_cache_destroy(); 1944 1945out_hvapi_release: 1946 n2_spu_hvapi_unregister(); 1947 goto out; 1948} 1949 1950static void release_global_resources(void) 1951{ 1952 mutex_lock(&spu_lock); 1953 if (!--global_ref) { 1954 kfree(cpu_to_cwq); 1955 cpu_to_cwq = NULL; 1956 1957 kfree(cpu_to_mau); 1958 cpu_to_mau = NULL; 1959 1960 queue_cache_destroy(); 1961 n2_spu_hvapi_unregister(); 1962 } 1963 mutex_unlock(&spu_lock); 1964} 1965 1966static struct n2_crypto * __devinit alloc_n2cp(void) 1967{ 1968 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); 1969 1970 if (np) 1971 INIT_LIST_HEAD(&np->cwq_list); 1972 1973 return np; 1974} 1975 1976static void free_n2cp(struct n2_crypto *np) 1977{ 1978 if (np->cwq_info.ino_table) { 1979 kfree(np->cwq_info.ino_table); 1980 np->cwq_info.ino_table = NULL; 1981 } 1982 1983 kfree(np); 1984} 1985 1986static void __devinit n2_spu_driver_version(void) 1987{ 1988 static int n2_spu_version_printed; 1989 1990 if (n2_spu_version_printed++ == 0) 1991 pr_info("%s", version); 1992} 1993 1994static int __devinit n2_crypto_probe(struct platform_device *dev, 1995 const struct of_device_id *match) 1996{ 1997 struct mdesc_handle *mdesc; 1998 const char *full_name; 1999 struct n2_crypto *np; 2000 int err; 2001 2002 n2_spu_driver_version(); 2003 2004 full_name = dev->dev.of_node->full_name; 2005 pr_info("Found N2CP at %s\n", full_name); 2006 2007 np = alloc_n2cp(); 2008 if (!np) { 2009 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", 2010 full_name); 2011 return -ENOMEM; 2012 } 2013 2014 err = grab_global_resources(); 2015 if (err) { 2016 dev_err(&dev->dev, "%s: Unable to grab " 2017 "global resources.\n", full_name); 2018 goto out_free_n2cp; 2019 } 2020 2021 mdesc = mdesc_grab(); 2022 2023 if (!mdesc) { 2024 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", 2025 full_name); 2026 err = -ENODEV; 2027 goto out_free_global; 2028 } 2029 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); 2030 if (err) { 2031 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", 2032 full_name); 2033 mdesc_release(mdesc); 2034 goto out_free_global; 2035 } 2036 2037 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, 2038 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, 2039 cpu_to_cwq); 2040 mdesc_release(mdesc); 2041 2042 if (err) { 2043 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", 2044 full_name); 2045 goto out_free_global; 2046 } 2047 2048 err = n2_register_algs(); 2049 if (err) { 2050 dev_err(&dev->dev, "%s: Unable to register algorithms.\n", 2051 full_name); 2052 goto out_free_spu_list; 2053 } 2054 2055 dev_set_drvdata(&dev->dev, np); 2056 2057 return 0; 2058 2059out_free_spu_list: 2060 spu_list_destroy(&np->cwq_list); 2061 2062out_free_global: 2063 release_global_resources(); 2064 2065out_free_n2cp: 2066 free_n2cp(np); 2067 2068 return err; 2069} 2070 2071static int __devexit n2_crypto_remove(struct platform_device *dev) 2072{ 2073 struct n2_crypto *np = dev_get_drvdata(&dev->dev); 2074 2075 n2_unregister_algs(); 2076 2077 spu_list_destroy(&np->cwq_list); 2078 2079 release_global_resources(); 2080 2081 free_n2cp(np); 2082 2083 return 0; 2084} 2085 2086static struct n2_mau * __devinit alloc_ncp(void) 2087{ 2088 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); 2089 2090 if (mp) 2091 INIT_LIST_HEAD(&mp->mau_list); 2092 2093 return mp; 2094} 2095 2096static void free_ncp(struct n2_mau *mp) 2097{ 2098 if (mp->mau_info.ino_table) { 2099 kfree(mp->mau_info.ino_table); 2100 mp->mau_info.ino_table = NULL; 2101 } 2102 2103 kfree(mp); 2104} 2105 2106static int __devinit n2_mau_probe(struct platform_device *dev, 2107 const struct of_device_id *match) 2108{ 2109 struct mdesc_handle *mdesc; 2110 const char *full_name; 2111 struct n2_mau *mp; 2112 int err; 2113 2114 n2_spu_driver_version(); 2115 2116 full_name = dev->dev.of_node->full_name; 2117 pr_info("Found NCP at %s\n", full_name); 2118 2119 mp = alloc_ncp(); 2120 if (!mp) { 2121 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", 2122 full_name); 2123 return -ENOMEM; 2124 } 2125 2126 err = grab_global_resources(); 2127 if (err) { 2128 dev_err(&dev->dev, "%s: Unable to grab " 2129 "global resources.\n", full_name); 2130 goto out_free_ncp; 2131 } 2132 2133 mdesc = mdesc_grab(); 2134 2135 if (!mdesc) { 2136 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", 2137 full_name); 2138 err = -ENODEV; 2139 goto out_free_global; 2140 } 2141 2142 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); 2143 if (err) { 2144 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", 2145 full_name); 2146 mdesc_release(mdesc); 2147 goto out_free_global; 2148 } 2149 2150 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, 2151 "mau", HV_NCS_QTYPE_MAU, mau_intr, 2152 cpu_to_mau); 2153 mdesc_release(mdesc); 2154 2155 if (err) { 2156 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", 2157 full_name); 2158 goto out_free_global; 2159 } 2160 2161 dev_set_drvdata(&dev->dev, mp); 2162 2163 return 0; 2164 2165out_free_global: 2166 release_global_resources(); 2167 2168out_free_ncp: 2169 free_ncp(mp); 2170 2171 return err; 2172} 2173 2174static int __devexit n2_mau_remove(struct platform_device *dev) 2175{ 2176 struct n2_mau *mp = dev_get_drvdata(&dev->dev); 2177 2178 spu_list_destroy(&mp->mau_list); 2179 2180 release_global_resources(); 2181 2182 free_ncp(mp); 2183 2184 return 0; 2185} 2186 2187static struct of_device_id n2_crypto_match[] = { 2188 { 2189 .name = "n2cp", 2190 .compatible = "SUNW,n2-cwq", 2191 }, 2192 { 2193 .name = "n2cp", 2194 .compatible = "SUNW,vf-cwq", 2195 }, 2196 {}, 2197}; 2198 2199MODULE_DEVICE_TABLE(of, n2_crypto_match); 2200 2201static struct of_platform_driver n2_crypto_driver = { 2202 .driver = { 2203 .name = "n2cp", 2204 .owner = THIS_MODULE, 2205 .of_match_table = n2_crypto_match, 2206 }, 2207 .probe = n2_crypto_probe, 2208 .remove = __devexit_p(n2_crypto_remove), 2209}; 2210 2211static struct of_device_id n2_mau_match[] = { 2212 { 2213 .name = "ncp", 2214 .compatible = "SUNW,n2-mau", 2215 }, 2216 { 2217 .name = "ncp", 2218 .compatible = "SUNW,vf-mau", 2219 }, 2220 {}, 2221}; 2222 2223MODULE_DEVICE_TABLE(of, n2_mau_match); 2224 2225static struct of_platform_driver n2_mau_driver = { 2226 .driver = { 2227 .name = "ncp", 2228 .owner = THIS_MODULE, 2229 .of_match_table = n2_mau_match, 2230 }, 2231 .probe = n2_mau_probe, 2232 .remove = __devexit_p(n2_mau_remove), 2233}; 2234 2235static int __init n2_init(void) 2236{ 2237 int err = of_register_platform_driver(&n2_crypto_driver); 2238 2239 if (!err) { 2240 err = of_register_platform_driver(&n2_mau_driver); 2241 if (err) 2242 of_unregister_platform_driver(&n2_crypto_driver); 2243 } 2244 return err; 2245} 2246 2247static void __exit n2_exit(void) 2248{ 2249 of_unregister_platform_driver(&n2_mau_driver); 2250 of_unregister_platform_driver(&n2_crypto_driver); 2251} 2252 2253module_init(n2_init); 2254module_exit(n2_exit); 2255