1/*
2 * Copyright 2017-2021 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the OpenSSL license (the "License").  You may not use
5 * this file except in compliance with the License.  You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10#include "e_os.h"
11#include <string.h>
12#include <sys/types.h>
13#include <sys/stat.h>
14#include <fcntl.h>
15#include <sys/ioctl.h>
16#include <unistd.h>
17#include <assert.h>
18
19#include <openssl/evp.h>
20#include <openssl/err.h>
21#include <openssl/engine.h>
22#include <openssl/objects.h>
23#include <crypto/cryptodev.h>
24
25#include "crypto/engine.h"
26
27#if CRYPTO_ALGORITHM_MIN < CRYPTO_ALGORITHM_MAX
28# define CHECK_BSD_STYLE_MACROS
29#endif
30
31/*
32 * ONE global file descriptor for all sessions.  This allows operations
33 * such as digest session data copying (see digest_copy()), but is also
34 * saner...  why re-open /dev/crypto for every session?
35 */
36static int cfd;
37
38static int clean_devcrypto_session(struct session_op *sess) {
39    if (ioctl(cfd, CIOCFSESSION, &sess->ses) < 0) {
40        SYSerr(SYS_F_IOCTL, errno);
41        return 0;
42    }
43    memset(sess, 0, sizeof(struct session_op));
44    return 1;
45}
46
47/******************************************************************************
48 *
49 * Ciphers
50 *
51 * Because they all do the same basic operation, we have only one set of
52 * method functions for them all to share, and a mapping table between
53 * NIDs and cryptodev IDs, with all the necessary size data.
54 *
55 *****/
56
57struct cipher_ctx {
58    struct session_op sess;
59    int op;                      /* COP_ENCRYPT or COP_DECRYPT */
60    unsigned long mode;          /* EVP_CIPH_*_MODE */
61
62    /* to handle ctr mode being a stream cipher */
63    unsigned char partial[EVP_MAX_BLOCK_LENGTH];
64    unsigned int blocksize, num;
65};
66
67static const struct cipher_data_st {
68    int nid;
69    int blocksize;
70    int keylen;
71    int ivlen;
72    int flags;
73    int devcryptoid;
74} cipher_data[] = {
75#ifndef OPENSSL_NO_DES
76    { NID_des_cbc, 8, 8, 8, EVP_CIPH_CBC_MODE, CRYPTO_DES_CBC },
77    { NID_des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, CRYPTO_3DES_CBC },
78#endif
79#ifndef OPENSSL_NO_BF
80    { NID_bf_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_BLF_CBC },
81#endif
82#ifndef OPENSSL_NO_CAST
83    { NID_cast5_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_CAST_CBC },
84#endif
85    { NID_aes_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
86    { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
87    { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
88#ifndef OPENSSL_NO_RC4
89    { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4 },
90#endif
91#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_CTR)
92    { NID_aes_128_ctr, 16, 128 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
93    { NID_aes_192_ctr, 16, 192 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
94    { NID_aes_256_ctr, 16, 256 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
95#endif
96#if 0                            /* Not yet supported */
97    { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
98    { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
99#endif
100#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB)
101    { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
102    { NID_aes_192_ecb, 16, 192 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
103    { NID_aes_256_ecb, 16, 256 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
104#endif
105#if 0                            /* Not yet supported */
106    { NID_aes_128_gcm, 16, 128 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
107    { NID_aes_192_gcm, 16, 192 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
108    { NID_aes_256_gcm, 16, 256 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
109#endif
110#ifndef OPENSSL_NO_CAMELLIA
111    { NID_camellia_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE,
112      CRYPTO_CAMELLIA_CBC },
113    { NID_camellia_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE,
114      CRYPTO_CAMELLIA_CBC },
115    { NID_camellia_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE,
116      CRYPTO_CAMELLIA_CBC },
117#endif
118};
119
120static size_t get_cipher_data_index(int nid)
121{
122    size_t i;
123
124    for (i = 0; i < OSSL_NELEM(cipher_data); i++)
125        if (nid == cipher_data[i].nid)
126            return i;
127
128    /*
129     * Code further down must make sure that only NIDs in the table above
130     * are used.  If any other NID reaches this function, there's a grave
131     * coding error further down.
132     */
133    assert("Code that never should be reached" == NULL);
134    return -1;
135}
136
137static const struct cipher_data_st *get_cipher_data(int nid)
138{
139    return &cipher_data[get_cipher_data_index(nid)];
140}
141
142/*
143 * Following are the three necessary functions to map OpenSSL functionality
144 * with cryptodev.
145 */
146
147static int cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key,
148                       const unsigned char *iv, int enc)
149{
150    struct cipher_ctx *cipher_ctx =
151        (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
152    const struct cipher_data_st *cipher_d =
153        get_cipher_data(EVP_CIPHER_CTX_nid(ctx));
154
155    /* cleanup a previous session */
156    if (cipher_ctx->sess.ses != 0 &&
157        clean_devcrypto_session(&cipher_ctx->sess) == 0)
158        return 0;
159
160    cipher_ctx->sess.cipher = cipher_d->devcryptoid;
161    cipher_ctx->sess.keylen = cipher_d->keylen;
162    cipher_ctx->sess.key = (void *)key;
163    cipher_ctx->op = enc ? COP_ENCRYPT : COP_DECRYPT;
164    cipher_ctx->mode = cipher_d->flags & EVP_CIPH_MODE;
165    cipher_ctx->blocksize = cipher_d->blocksize;
166    if (ioctl(cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) {
167        SYSerr(SYS_F_IOCTL, errno);
168        return 0;
169    }
170
171    return 1;
172}
173
174static int cipher_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
175                            const unsigned char *in, size_t inl)
176{
177    struct cipher_ctx *cipher_ctx =
178        (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
179    struct crypt_op cryp;
180    unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
181#if !defined(COP_FLAG_WRITE_IV)
182    unsigned char saved_iv[EVP_MAX_IV_LENGTH];
183    const unsigned char *ivptr;
184    size_t nblocks, ivlen;
185#endif
186
187    memset(&cryp, 0, sizeof(cryp));
188    cryp.ses = cipher_ctx->sess.ses;
189    cryp.len = inl;
190    cryp.src = (void *)in;
191    cryp.dst = (void *)out;
192    cryp.iv = (void *)iv;
193    cryp.op = cipher_ctx->op;
194#if !defined(COP_FLAG_WRITE_IV)
195    cryp.flags = 0;
196
197    ivlen = EVP_CIPHER_CTX_iv_length(ctx);
198    if (ivlen > 0)
199        switch (cipher_ctx->mode) {
200        case EVP_CIPH_CBC_MODE:
201            assert(inl >= ivlen);
202            if (!EVP_CIPHER_CTX_encrypting(ctx)) {
203                ivptr = in + inl - ivlen;
204                memcpy(saved_iv, ivptr, ivlen);
205            }
206            break;
207
208        case EVP_CIPH_CTR_MODE:
209            break;
210
211        default: /* should not happen */
212            return 0;
213        }
214#else
215    cryp.flags = COP_FLAG_WRITE_IV;
216#endif
217
218    if (ioctl(cfd, CIOCCRYPT, &cryp) < 0) {
219        SYSerr(SYS_F_IOCTL, errno);
220        return 0;
221    }
222
223#if !defined(COP_FLAG_WRITE_IV)
224    if (ivlen > 0)
225        switch (cipher_ctx->mode) {
226        case EVP_CIPH_CBC_MODE:
227            assert(inl >= ivlen);
228            if (EVP_CIPHER_CTX_encrypting(ctx))
229                ivptr = out + inl - ivlen;
230            else
231                ivptr = saved_iv;
232
233            memcpy(iv, ivptr, ivlen);
234            break;
235
236        case EVP_CIPH_CTR_MODE:
237            nblocks = (inl + cipher_ctx->blocksize - 1)
238                      / cipher_ctx->blocksize;
239            do {
240                ivlen--;
241                nblocks += iv[ivlen];
242                iv[ivlen] = (uint8_t) nblocks;
243                nblocks >>= 8;
244            } while (ivlen);
245            break;
246
247        default: /* should not happen */
248            return 0;
249        }
250#endif
251
252    return 1;
253}
254
255static int ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
256                         const unsigned char *in, size_t inl)
257{
258    struct cipher_ctx *cipher_ctx =
259        (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
260    size_t nblocks, len;
261
262    /* initial partial block */
263    while (cipher_ctx->num && inl) {
264        (*out++) = *(in++) ^ cipher_ctx->partial[cipher_ctx->num];
265        --inl;
266        cipher_ctx->num = (cipher_ctx->num + 1) % cipher_ctx->blocksize;
267    }
268
269    /* full blocks */
270    if (inl > (unsigned int) cipher_ctx->blocksize) {
271        nblocks = inl/cipher_ctx->blocksize;
272        len = nblocks * cipher_ctx->blocksize;
273        if (cipher_do_cipher(ctx, out, in, len) < 1)
274            return 0;
275        inl -= len;
276        out += len;
277        in += len;
278    }
279
280    /* final partial block */
281    if (inl) {
282        memset(cipher_ctx->partial, 0, cipher_ctx->blocksize);
283        if (cipher_do_cipher(ctx, cipher_ctx->partial, cipher_ctx->partial,
284            cipher_ctx->blocksize) < 1)
285            return 0;
286        while (inl--) {
287            out[cipher_ctx->num] = in[cipher_ctx->num]
288                                   ^ cipher_ctx->partial[cipher_ctx->num];
289            cipher_ctx->num++;
290        }
291    }
292
293    return 1;
294}
295
296static int cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int p1, void* p2)
297{
298    struct cipher_ctx *cipher_ctx =
299        (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
300    EVP_CIPHER_CTX *to_ctx = (EVP_CIPHER_CTX *)p2;
301    struct cipher_ctx *to_cipher_ctx;
302
303    switch (type) {
304    case EVP_CTRL_COPY:
305        if (cipher_ctx == NULL)
306            return 1;
307        /* when copying the context, a new session needs to be initialized */
308        to_cipher_ctx =
309            (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(to_ctx);
310        memset(&to_cipher_ctx->sess, 0, sizeof(to_cipher_ctx->sess));
311        return cipher_init(to_ctx, cipher_ctx->sess.key, EVP_CIPHER_CTX_iv(ctx),
312                           (cipher_ctx->op == COP_ENCRYPT));
313
314    case EVP_CTRL_INIT:
315        memset(&cipher_ctx->sess, 0, sizeof(cipher_ctx->sess));
316        return 1;
317
318    default:
319        break;
320    }
321
322    return -1;
323}
324
325static int cipher_cleanup(EVP_CIPHER_CTX *ctx)
326{
327    struct cipher_ctx *cipher_ctx =
328        (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
329
330    return clean_devcrypto_session(&cipher_ctx->sess);
331}
332
333/*
334 * Keep a table of known nids and associated methods.
335 * Note that known_cipher_nids[] isn't necessarily indexed the same way as
336 * cipher_data[] above, which known_cipher_methods[] is.
337 */
338static int known_cipher_nids[OSSL_NELEM(cipher_data)];
339static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */
340static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, };
341
342static void prepare_cipher_methods(void)
343{
344    size_t i;
345    struct session_op sess;
346    unsigned long cipher_mode;
347
348    memset(&sess, 0, sizeof(sess));
349    sess.key = (void *)"01234567890123456789012345678901234567890123456789";
350
351    for (i = 0, known_cipher_nids_amount = 0;
352         i < OSSL_NELEM(cipher_data); i++) {
353
354        /*
355         * Check that the algo is really availably by trying to open and close
356         * a session.
357         */
358        sess.cipher = cipher_data[i].devcryptoid;
359        sess.keylen = cipher_data[i].keylen;
360        if (ioctl(cfd, CIOCGSESSION, &sess) < 0
361            || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0)
362            continue;
363
364        cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE;
365
366        if ((known_cipher_methods[i] =
367                 EVP_CIPHER_meth_new(cipher_data[i].nid,
368                                     cipher_mode == EVP_CIPH_CTR_MODE ? 1 :
369                                                    cipher_data[i].blocksize,
370                                     cipher_data[i].keylen)) == NULL
371            || !EVP_CIPHER_meth_set_iv_length(known_cipher_methods[i],
372                                              cipher_data[i].ivlen)
373            || !EVP_CIPHER_meth_set_flags(known_cipher_methods[i],
374                                          cipher_data[i].flags
375                                          | EVP_CIPH_CUSTOM_COPY
376                                          | EVP_CIPH_CTRL_INIT
377                                          | EVP_CIPH_FLAG_DEFAULT_ASN1)
378            || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], cipher_init)
379            || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i],
380                                     cipher_mode == EVP_CIPH_CTR_MODE ?
381                                              ctr_do_cipher :
382                                              cipher_do_cipher)
383            || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], cipher_ctrl)
384            || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i],
385                                            cipher_cleanup)
386            || !EVP_CIPHER_meth_set_impl_ctx_size(known_cipher_methods[i],
387                                                  sizeof(struct cipher_ctx))) {
388            EVP_CIPHER_meth_free(known_cipher_methods[i]);
389            known_cipher_methods[i] = NULL;
390        } else {
391            known_cipher_nids[known_cipher_nids_amount++] =
392                cipher_data[i].nid;
393        }
394    }
395}
396
397static const EVP_CIPHER *get_cipher_method(int nid)
398{
399    size_t i = get_cipher_data_index(nid);
400
401    if (i == (size_t)-1)
402        return NULL;
403    return known_cipher_methods[i];
404}
405
406static int get_cipher_nids(const int **nids)
407{
408    *nids = known_cipher_nids;
409    return known_cipher_nids_amount;
410}
411
412static void destroy_cipher_method(int nid)
413{
414    size_t i = get_cipher_data_index(nid);
415
416    EVP_CIPHER_meth_free(known_cipher_methods[i]);
417    known_cipher_methods[i] = NULL;
418}
419
420static void destroy_all_cipher_methods(void)
421{
422    size_t i;
423
424    for (i = 0; i < OSSL_NELEM(cipher_data); i++)
425        destroy_cipher_method(cipher_data[i].nid);
426}
427
428static int devcrypto_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
429                             const int **nids, int nid)
430{
431    if (cipher == NULL)
432        return get_cipher_nids(nids);
433
434    *cipher = get_cipher_method(nid);
435
436    return *cipher != NULL;
437}
438
439/*
440 * We only support digests if the cryptodev implementation supports multiple
441 * data updates and session copying.  Otherwise, we would be forced to maintain
442 * a cache, which is perilous if there's a lot of data coming in (if someone
443 * wants to checksum an OpenSSL tarball, for example).
444 */
445#if defined(CIOCCPHASH) && defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
446#define IMPLEMENT_DIGEST
447
448/******************************************************************************
449 *
450 * Digests
451 *
452 * Because they all do the same basic operation, we have only one set of
453 * method functions for them all to share, and a mapping table between
454 * NIDs and cryptodev IDs, with all the necessary size data.
455 *
456 *****/
457
458struct digest_ctx {
459    struct session_op sess;
460    /* This signals that the init function was called, not that it succeeded. */
461    int init_called;
462};
463
464static const struct digest_data_st {
465    int nid;
466    int blocksize;
467    int digestlen;
468    int devcryptoid;
469} digest_data[] = {
470#ifndef OPENSSL_NO_MD5
471    { NID_md5, /* MD5_CBLOCK */ 64, 16, CRYPTO_MD5 },
472#endif
473    { NID_sha1, SHA_CBLOCK, 20, CRYPTO_SHA1 },
474#ifndef OPENSSL_NO_RMD160
475# if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_RIPEMD160)
476    { NID_ripemd160, /* RIPEMD160_CBLOCK */ 64, 20, CRYPTO_RIPEMD160 },
477# endif
478#endif
479#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_224)
480    { NID_sha224, SHA256_CBLOCK, 224 / 8, CRYPTO_SHA2_224 },
481#endif
482#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_256)
483    { NID_sha256, SHA256_CBLOCK, 256 / 8, CRYPTO_SHA2_256 },
484#endif
485#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_384)
486    { NID_sha384, SHA512_CBLOCK, 384 / 8, CRYPTO_SHA2_384 },
487#endif
488#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_512)
489    { NID_sha512, SHA512_CBLOCK, 512 / 8, CRYPTO_SHA2_512 },
490#endif
491};
492
493static size_t get_digest_data_index(int nid)
494{
495    size_t i;
496
497    for (i = 0; i < OSSL_NELEM(digest_data); i++)
498        if (nid == digest_data[i].nid)
499            return i;
500
501    /*
502     * Code further down must make sure that only NIDs in the table above
503     * are used.  If any other NID reaches this function, there's a grave
504     * coding error further down.
505     */
506    assert("Code that never should be reached" == NULL);
507    return -1;
508}
509
510static const struct digest_data_st *get_digest_data(int nid)
511{
512    return &digest_data[get_digest_data_index(nid)];
513}
514
515/*
516 * Following are the four necessary functions to map OpenSSL functionality
517 * with cryptodev.
518 */
519
520static int digest_init(EVP_MD_CTX *ctx)
521{
522    struct digest_ctx *digest_ctx =
523        (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
524    const struct digest_data_st *digest_d =
525        get_digest_data(EVP_MD_CTX_type(ctx));
526
527    digest_ctx->init_called = 1;
528
529    memset(&digest_ctx->sess, 0, sizeof(digest_ctx->sess));
530    digest_ctx->sess.mac = digest_d->devcryptoid;
531    if (ioctl(cfd, CIOCGSESSION, &digest_ctx->sess) < 0) {
532        SYSerr(SYS_F_IOCTL, errno);
533        return 0;
534    }
535
536    return 1;
537}
538
539static int digest_op(struct digest_ctx *ctx, const void *src, size_t srclen,
540                     void *res, unsigned int flags)
541{
542    struct crypt_op cryp;
543
544    memset(&cryp, 0, sizeof(cryp));
545    cryp.ses = ctx->sess.ses;
546    cryp.len = srclen;
547    cryp.src = (void *)src;
548    cryp.dst = NULL;
549    cryp.mac = res;
550    cryp.flags = flags;
551    return ioctl(cfd, CIOCCRYPT, &cryp);
552}
553
554static int digest_update(EVP_MD_CTX *ctx, const void *data, size_t count)
555{
556    struct digest_ctx *digest_ctx =
557        (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
558
559    if (count == 0)
560        return 1;
561
562    if (digest_ctx == NULL)
563        return 0;
564
565    if (digest_op(digest_ctx, data, count, NULL, COP_FLAG_UPDATE) < 0) {
566        SYSerr(SYS_F_IOCTL, errno);
567        return 0;
568    }
569
570    return 1;
571}
572
573static int digest_final(EVP_MD_CTX *ctx, unsigned char *md)
574{
575    struct digest_ctx *digest_ctx =
576        (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
577
578    if (md == NULL || digest_ctx == NULL)
579        return 0;
580    if (digest_op(digest_ctx, NULL, 0, md, COP_FLAG_FINAL) < 0) {
581        SYSerr(SYS_F_IOCTL, errno);
582        return 0;
583    }
584
585    return 1;
586}
587
588static int digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from)
589{
590    struct digest_ctx *digest_from =
591        (struct digest_ctx *)EVP_MD_CTX_md_data(from);
592    struct digest_ctx *digest_to =
593        (struct digest_ctx *)EVP_MD_CTX_md_data(to);
594    struct cphash_op cphash;
595
596    if (digest_from == NULL || digest_from->init_called != 1)
597        return 1;
598
599    if (!digest_init(to)) {
600        SYSerr(SYS_F_IOCTL, errno);
601        return 0;
602    }
603
604    cphash.src_ses = digest_from->sess.ses;
605    cphash.dst_ses = digest_to->sess.ses;
606    if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) {
607        SYSerr(SYS_F_IOCTL, errno);
608        return 0;
609    }
610    return 1;
611}
612
613static int digest_cleanup(EVP_MD_CTX *ctx)
614{
615    struct digest_ctx *digest_ctx =
616        (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
617
618    if (digest_ctx == NULL)
619        return 1;
620
621    return clean_devcrypto_session(&digest_ctx->sess);
622}
623
624static int devcrypto_test_digest(size_t digest_data_index)
625{
626    struct session_op sess1, sess2;
627    struct cphash_op cphash;
628    int ret=0;
629
630    memset(&sess1, 0, sizeof(sess1));
631    memset(&sess2, 0, sizeof(sess2));
632    sess1.mac = digest_data[digest_data_index].devcryptoid;
633    if (ioctl(cfd, CIOCGSESSION, &sess1) < 0)
634        return 0;
635    /* Make sure the driver is capable of hash state copy */
636    sess2.mac = sess1.mac;
637    if (ioctl(cfd, CIOCGSESSION, &sess2) >= 0) {
638        cphash.src_ses = sess1.ses;
639        cphash.dst_ses = sess2.ses;
640        if (ioctl(cfd, CIOCCPHASH, &cphash) >= 0)
641            ret = 1;
642        ioctl(cfd, CIOCFSESSION, &sess2.ses);
643    }
644    ioctl(cfd, CIOCFSESSION, &sess1.ses);
645    return ret;
646}
647
648/*
649 * Keep a table of known nids and associated methods.
650 * Note that known_digest_nids[] isn't necessarily indexed the same way as
651 * digest_data[] above, which known_digest_methods[] is.
652 */
653static int known_digest_nids[OSSL_NELEM(digest_data)];
654static int known_digest_nids_amount = -1; /* -1 indicates not yet initialised */
655static EVP_MD *known_digest_methods[OSSL_NELEM(digest_data)] = { NULL, };
656
657static void prepare_digest_methods(void)
658{
659    size_t i;
660
661    for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data);
662         i++) {
663
664        /*
665         * Check that the algo is usable
666         */
667        if (!devcrypto_test_digest(i))
668            continue;
669
670        if ((known_digest_methods[i] = EVP_MD_meth_new(digest_data[i].nid,
671                                                       NID_undef)) == NULL
672            || !EVP_MD_meth_set_input_blocksize(known_digest_methods[i],
673                                                digest_data[i].blocksize)
674            || !EVP_MD_meth_set_result_size(known_digest_methods[i],
675                                            digest_data[i].digestlen)
676            || !EVP_MD_meth_set_init(known_digest_methods[i], digest_init)
677            || !EVP_MD_meth_set_update(known_digest_methods[i], digest_update)
678            || !EVP_MD_meth_set_final(known_digest_methods[i], digest_final)
679            || !EVP_MD_meth_set_copy(known_digest_methods[i], digest_copy)
680            || !EVP_MD_meth_set_cleanup(known_digest_methods[i], digest_cleanup)
681            || !EVP_MD_meth_set_app_datasize(known_digest_methods[i],
682                                             sizeof(struct digest_ctx))) {
683            EVP_MD_meth_free(known_digest_methods[i]);
684            known_digest_methods[i] = NULL;
685        } else {
686            known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid;
687        }
688    }
689}
690
691static const EVP_MD *get_digest_method(int nid)
692{
693    size_t i = get_digest_data_index(nid);
694
695    if (i == (size_t)-1)
696        return NULL;
697    return known_digest_methods[i];
698}
699
700static int get_digest_nids(const int **nids)
701{
702    *nids = known_digest_nids;
703    return known_digest_nids_amount;
704}
705
706static void destroy_digest_method(int nid)
707{
708    size_t i = get_digest_data_index(nid);
709
710    EVP_MD_meth_free(known_digest_methods[i]);
711    known_digest_methods[i] = NULL;
712}
713
714static void destroy_all_digest_methods(void)
715{
716    size_t i;
717
718    for (i = 0; i < OSSL_NELEM(digest_data); i++)
719        destroy_digest_method(digest_data[i].nid);
720}
721
722static int devcrypto_digests(ENGINE *e, const EVP_MD **digest,
723                             const int **nids, int nid)
724{
725    if (digest == NULL)
726        return get_digest_nids(nids);
727
728    *digest = get_digest_method(nid);
729
730    return *digest != NULL;
731}
732
733#endif
734
735/******************************************************************************
736 *
737 * LOAD / UNLOAD
738 *
739 *****/
740
741static int devcrypto_unload(ENGINE *e)
742{
743    destroy_all_cipher_methods();
744#ifdef IMPLEMENT_DIGEST
745    destroy_all_digest_methods();
746#endif
747
748    close(cfd);
749
750    return 1;
751}
752/*
753 * This engine is always built into libcrypto, so it doesn't offer any
754 * ability to be dynamically loadable.
755 */
756void engine_load_devcrypto_int()
757{
758    ENGINE *e = NULL;
759    int fd;
760
761    if ((fd = open("/dev/crypto", O_RDWR, 0)) < 0) {
762            fprintf(stderr, "Could not open /dev/crypto: %s\n", strerror(errno));
763        return;
764    }
765
766#ifdef CRIOGET
767    if (ioctl(fd, CRIOGET, &cfd) < 0) {
768        fprintf(stderr, "Could not create crypto fd: %s\n", strerror(errno));
769        close(fd);
770        cfd = -1;
771        return;
772    }
773    close(fd);
774#else
775    cfd = fd;
776#endif
777
778    if ((e = ENGINE_new()) == NULL
779        || !ENGINE_set_destroy_function(e, devcrypto_unload)) {
780        ENGINE_free(e);
781        /*
782         * We know that devcrypto_unload() won't be called when one of the
783         * above two calls have failed, so we close cfd explicitly here to
784         * avoid leaking resources.
785         */
786        close(cfd);
787        return;
788    }
789
790    prepare_cipher_methods();
791#ifdef IMPLEMENT_DIGEST
792    prepare_digest_methods();
793#endif
794
795    if (!ENGINE_set_id(e, "devcrypto")
796        || !ENGINE_set_name(e, "/dev/crypto engine")
797
798/*
799 * Asymmetric ciphers aren't well supported with /dev/crypto.  Among the BSD
800 * implementations, it seems to only exist in FreeBSD, and regarding the
801 * parameters in its crypt_kop, the manual crypto(4) has this to say:
802 *
803 *    The semantics of these arguments are currently undocumented.
804 *
805 * Reading through the FreeBSD source code doesn't give much more than
806 * their CRK_MOD_EXP implementation for ubsec.
807 *
808 * It doesn't look much better with cryptodev-linux.  They have the crypt_kop
809 * structure as well as the command (CRK_*) in cryptodev.h, but no support
810 * seems to be implemented at all for the moment.
811 *
812 * At the time of writing, it seems impossible to write proper support for
813 * FreeBSD's asym features without some very deep knowledge and access to
814 * specific kernel modules.
815 *
816 * /Richard Levitte, 2017-05-11
817 */
818#if 0
819# ifndef OPENSSL_NO_RSA
820        || !ENGINE_set_RSA(e, devcrypto_rsa)
821# endif
822# ifndef OPENSSL_NO_DSA
823        || !ENGINE_set_DSA(e, devcrypto_dsa)
824# endif
825# ifndef OPENSSL_NO_DH
826        || !ENGINE_set_DH(e, devcrypto_dh)
827# endif
828# ifndef OPENSSL_NO_EC
829        || !ENGINE_set_EC(e, devcrypto_ec)
830# endif
831#endif
832        || !ENGINE_set_ciphers(e, devcrypto_ciphers)
833#ifdef IMPLEMENT_DIGEST
834        || !ENGINE_set_digests(e, devcrypto_digests)
835#endif
836        ) {
837        ENGINE_free(e);
838        return;
839    }
840
841    ENGINE_add(e);
842    ENGINE_free(e);          /* Loose our local reference */
843    ERR_clear_error();
844}
845