1/* cipher.c  -	cipher dispatcher
2 * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
3 *               2005, 2007, 2008, 2009 Free Software Foundation, Inc.
4 *
5 * This file is part of Libgcrypt.
6 *
7 * Libgcrypt is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser general Public License as
9 * published by the Free Software Foundation; either version 2.1 of
10 * the License, or (at your option) any later version.
11 *
12 * Libgcrypt is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <config.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <errno.h>
26
27#include "g10lib.h"
28#include "cipher.h"
29#include "ath.h"
30
31#define MAX_BLOCKSIZE 16
32#define TABLE_SIZE 14
33#define CTX_MAGIC_NORMAL 0x24091964
34#define CTX_MAGIC_SECURE 0x46919042
35
36/* Try to use 16 byte aligned cipher context for better performance.
37   We use the aligned attribute, thus it is only possible to implement
38   this with gcc.  */
39#undef NEED_16BYTE_ALIGNED_CONTEXT
40#if defined (__GNUC__)
41# define NEED_16BYTE_ALIGNED_CONTEXT 1
42#endif
43
44/* A dummy extraspec so that we do not need to tests the extraspec
45   field from the module specification against NULL and instead
46   directly test the respective fields of extraspecs.  */
47static cipher_extra_spec_t dummy_extra_spec;
48
49/* This is the list of the default ciphers, which are included in
50   libgcrypt.  */
51static struct cipher_table_entry
52{
53  gcry_cipher_spec_t *cipher;
54  cipher_extra_spec_t *extraspec;
55  unsigned int algorithm;
56  int fips_allowed;
57} cipher_table[] =
58  {
59#if USE_BLOWFISH
60    { &_gcry_cipher_spec_blowfish,
61      &dummy_extra_spec,                  GCRY_CIPHER_BLOWFISH },
62#endif
63#if USE_DES
64    { &_gcry_cipher_spec_des,
65      &dummy_extra_spec,                  GCRY_CIPHER_DES },
66    { &_gcry_cipher_spec_tripledes,
67      &_gcry_cipher_extraspec_tripledes,  GCRY_CIPHER_3DES, 1 },
68#endif
69#if USE_ARCFOUR
70    { &_gcry_cipher_spec_arcfour,
71      &dummy_extra_spec,                  GCRY_CIPHER_ARCFOUR },
72#endif
73#if USE_CAST5
74    { &_gcry_cipher_spec_cast5,
75      &dummy_extra_spec,                  GCRY_CIPHER_CAST5 },
76#endif
77#if USE_AES
78    { &_gcry_cipher_spec_aes,
79      &_gcry_cipher_extraspec_aes,        GCRY_CIPHER_AES,    1 },
80    { &_gcry_cipher_spec_aes192,
81      &_gcry_cipher_extraspec_aes192,     GCRY_CIPHER_AES192, 1 },
82    { &_gcry_cipher_spec_aes256,
83      &_gcry_cipher_extraspec_aes256,     GCRY_CIPHER_AES256, 1 },
84#endif
85#if USE_TWOFISH
86    { &_gcry_cipher_spec_twofish,
87      &dummy_extra_spec,                  GCRY_CIPHER_TWOFISH },
88    { &_gcry_cipher_spec_twofish128,
89      &dummy_extra_spec,                  GCRY_CIPHER_TWOFISH128 },
90#endif
91#if USE_SERPENT
92    { &_gcry_cipher_spec_serpent128,
93      &dummy_extra_spec,                  GCRY_CIPHER_SERPENT128 },
94    { &_gcry_cipher_spec_serpent192,
95      &dummy_extra_spec,                  GCRY_CIPHER_SERPENT192 },
96    { &_gcry_cipher_spec_serpent256,
97      &dummy_extra_spec,                  GCRY_CIPHER_SERPENT256 },
98#endif
99#if USE_RFC2268
100    { &_gcry_cipher_spec_rfc2268_40,
101      &dummy_extra_spec,                  GCRY_CIPHER_RFC2268_40 },
102#endif
103#if USE_SEED
104    { &_gcry_cipher_spec_seed,
105      &dummy_extra_spec,                  GCRY_CIPHER_SEED },
106#endif
107#if USE_CAMELLIA
108    { &_gcry_cipher_spec_camellia128,
109      &dummy_extra_spec,                  GCRY_CIPHER_CAMELLIA128 },
110    { &_gcry_cipher_spec_camellia192,
111      &dummy_extra_spec,                  GCRY_CIPHER_CAMELLIA192 },
112    { &_gcry_cipher_spec_camellia256,
113      &dummy_extra_spec,                  GCRY_CIPHER_CAMELLIA256 },
114#endif
115    { NULL                    }
116  };
117
118/* List of registered ciphers.  */
119static gcry_module_t ciphers_registered;
120
121/* This is the lock protecting CIPHERS_REGISTERED.  */
122static ath_mutex_t ciphers_registered_lock = ATH_MUTEX_INITIALIZER;
123
124/* Flag to check whether the default ciphers have already been
125   registered.  */
126static int default_ciphers_registered;
127
128/* Convenient macro for registering the default ciphers.  */
129#define REGISTER_DEFAULT_CIPHERS                   \
130  do                                               \
131    {                                              \
132      ath_mutex_lock (&ciphers_registered_lock);   \
133      if (! default_ciphers_registered)            \
134        {                                          \
135          cipher_register_default ();              \
136          default_ciphers_registered = 1;          \
137        }                                          \
138      ath_mutex_unlock (&ciphers_registered_lock); \
139    }                                              \
140  while (0)
141
142
143/* A VIA processor with the Padlock engine as well as the Intel AES_NI
144   instructions require an alignment of most data on a 16 byte
145   boundary.  Because we trick out the compiler while allocating the
146   context, the align attribute as used in rijndael.c does not work on
147   its own.  Thus we need to make sure that the entire context
148   structure is a aligned on that boundary.  We achieve this by
149   defining a new type and use that instead of our usual alignment
150   type.  */
151typedef union
152{
153  PROPERLY_ALIGNED_TYPE foo;
154#ifdef NEED_16BYTE_ALIGNED_CONTEXT
155  char bar[16] __attribute__ ((aligned (16)));
156#endif
157  char c[1];
158} cipher_context_alignment_t;
159
160
161/* The handle structure.  */
162struct gcry_cipher_handle
163{
164  int magic;
165  size_t actual_handle_size;     /* Allocated size of this handle. */
166  size_t handle_offset;          /* Offset to the malloced block.  */
167  gcry_cipher_spec_t *cipher;
168  cipher_extra_spec_t *extraspec;
169  gcry_module_t module;
170
171  /* The algorithm id.  This is a hack required because the module
172     interface does not easily allow to retrieve this value. */
173  int algo;
174
175  /* A structure with function pointers for bulk operations.  Due to
176     limitations of the module system (we don't want to change the
177     API) we need to keep these function pointers here.  The cipher
178     open function intializes them and the actual encryption routines
179     use them if they are not NULL.  */
180  struct {
181    void (*cfb_enc)(void *context, unsigned char *iv,
182                    void *outbuf_arg, const void *inbuf_arg,
183                    unsigned int nblocks);
184    void (*cfb_dec)(void *context, unsigned char *iv,
185                    void *outbuf_arg, const void *inbuf_arg,
186                    unsigned int nblocks);
187    void (*cbc_enc)(void *context, unsigned char *iv,
188                    void *outbuf_arg, const void *inbuf_arg,
189                    unsigned int nblocks, int cbc_mac);
190    void (*cbc_dec)(void *context, unsigned char *iv,
191                    void *outbuf_arg, const void *inbuf_arg,
192                    unsigned int nblocks);
193    void (*ctr_enc)(void *context, unsigned char *iv,
194                    void *outbuf_arg, const void *inbuf_arg,
195                    unsigned int nblocks);
196  } bulk;
197
198
199  int mode;
200  unsigned int flags;
201
202  struct {
203    unsigned int key:1; /* Set to 1 if a key has been set.  */
204    unsigned int iv:1;  /* Set to 1 if a IV has been set.  */
205  } marks;
206
207  /* The initialization vector.  For best performance we make sure
208     that it is properly aligned.  In particular some implementations
209     of bulk operations expect an 16 byte aligned IV.  */
210  union {
211    cipher_context_alignment_t iv_align;
212    unsigned char iv[MAX_BLOCKSIZE];
213  } u_iv;
214
215  /* The counter for CTR mode.  This field is also used by AESWRAP and
216     thus we can't use the U_IV union.  */
217  union {
218    cipher_context_alignment_t iv_align;
219    unsigned char ctr[MAX_BLOCKSIZE];
220  } u_ctr;
221
222  /* Space to save an IV or CTR for chaining operations.  */
223  unsigned char lastiv[MAX_BLOCKSIZE];
224  int unused;  /* Number of unused bytes in LASTIV. */
225
226  /* What follows are two contexts of the cipher in use.  The first
227     one needs to be aligned well enough for the cipher operation
228     whereas the second one is a copy created by cipher_setkey and
229     used by cipher_reset.  That second copy has no need for proper
230     aligment because it is only accessed by memcpy.  */
231  cipher_context_alignment_t context;
232};
233
234
235
236/* These dummy functions are used in case a cipher implementation
237   refuses to provide it's own functions.  */
238
239static gcry_err_code_t
240dummy_setkey (void *c, const unsigned char *key, unsigned int keylen)
241{
242  (void)c;
243  (void)key;
244  (void)keylen;
245  return GPG_ERR_NO_ERROR;
246}
247
248static void
249dummy_encrypt_block (void *c,
250		     unsigned char *outbuf, const unsigned char *inbuf)
251{
252  (void)c;
253  (void)outbuf;
254  (void)inbuf;
255  BUG();
256}
257
258static void
259dummy_decrypt_block (void *c,
260		     unsigned char *outbuf, const unsigned char *inbuf)
261{
262  (void)c;
263  (void)outbuf;
264  (void)inbuf;
265  BUG();
266}
267
268static void
269dummy_encrypt_stream (void *c,
270		      unsigned char *outbuf, const unsigned char *inbuf,
271		      unsigned int n)
272{
273  (void)c;
274  (void)outbuf;
275  (void)inbuf;
276  (void)n;
277  BUG();
278}
279
280static void
281dummy_decrypt_stream (void *c,
282		      unsigned char *outbuf, const unsigned char *inbuf,
283		      unsigned int n)
284{
285  (void)c;
286  (void)outbuf;
287  (void)inbuf;
288  (void)n;
289  BUG();
290}
291
292
293/* Internal function.  Register all the ciphers included in
294   CIPHER_TABLE.  Note, that this function gets only used by the macro
295   REGISTER_DEFAULT_CIPHERS which protects it using a mutex. */
296static void
297cipher_register_default (void)
298{
299  gcry_err_code_t err = GPG_ERR_NO_ERROR;
300  int i;
301
302  for (i = 0; !err && cipher_table[i].cipher; i++)
303    {
304      if (! cipher_table[i].cipher->setkey)
305	cipher_table[i].cipher->setkey = dummy_setkey;
306      if (! cipher_table[i].cipher->encrypt)
307	cipher_table[i].cipher->encrypt = dummy_encrypt_block;
308      if (! cipher_table[i].cipher->decrypt)
309	cipher_table[i].cipher->decrypt = dummy_decrypt_block;
310      if (! cipher_table[i].cipher->stencrypt)
311	cipher_table[i].cipher->stencrypt = dummy_encrypt_stream;
312      if (! cipher_table[i].cipher->stdecrypt)
313	cipher_table[i].cipher->stdecrypt = dummy_decrypt_stream;
314
315      if ( fips_mode () && !cipher_table[i].fips_allowed )
316        continue;
317
318      err = _gcry_module_add (&ciphers_registered,
319			      cipher_table[i].algorithm,
320			      (void *) cipher_table[i].cipher,
321			      (void *) cipher_table[i].extraspec,
322			      NULL);
323    }
324
325  if (err)
326    BUG ();
327}
328
329/* Internal callback function.  Used via _gcry_module_lookup.  */
330static int
331gcry_cipher_lookup_func_name (void *spec, void *data)
332{
333  gcry_cipher_spec_t *cipher = (gcry_cipher_spec_t *) spec;
334  char *name = (char *) data;
335  const char **aliases = cipher->aliases;
336  int i, ret = ! stricmp (name, cipher->name);
337
338  if (aliases)
339    for (i = 0; aliases[i] && (! ret); i++)
340      ret = ! stricmp (name, aliases[i]);
341
342  return ret;
343}
344
345/* Internal callback function.  Used via _gcry_module_lookup.  */
346static int
347gcry_cipher_lookup_func_oid (void *spec, void *data)
348{
349  gcry_cipher_spec_t *cipher = (gcry_cipher_spec_t *) spec;
350  char *oid = (char *) data;
351  gcry_cipher_oid_spec_t *oid_specs = cipher->oids;
352  int ret = 0, i;
353
354  if (oid_specs)
355    for (i = 0; oid_specs[i].oid && (! ret); i++)
356      if (! stricmp (oid, oid_specs[i].oid))
357	ret = 1;
358
359  return ret;
360}
361
362/* Internal function.  Lookup a cipher entry by it's name.  */
363static gcry_module_t
364gcry_cipher_lookup_name (const char *name)
365{
366  gcry_module_t cipher;
367
368  cipher = _gcry_module_lookup (ciphers_registered, (void *) name,
369				gcry_cipher_lookup_func_name);
370
371  return cipher;
372}
373
374/* Internal function.  Lookup a cipher entry by it's oid.  */
375static gcry_module_t
376gcry_cipher_lookup_oid (const char *oid)
377{
378  gcry_module_t cipher;
379
380  cipher = _gcry_module_lookup (ciphers_registered, (void *) oid,
381				gcry_cipher_lookup_func_oid);
382
383  return cipher;
384}
385
386/* Register a new cipher module whose specification can be found in
387   CIPHER.  On success, a new algorithm ID is stored in ALGORITHM_ID
388   and a pointer representhing this module is stored in MODULE.  */
389gcry_error_t
390_gcry_cipher_register (gcry_cipher_spec_t *cipher,
391                       cipher_extra_spec_t *extraspec,
392                       int *algorithm_id,
393                       gcry_module_t *module)
394{
395  gcry_err_code_t err = 0;
396  gcry_module_t mod;
397
398  /* We do not support module loading in fips mode.  */
399  if (fips_mode ())
400    return gpg_error (GPG_ERR_NOT_SUPPORTED);
401
402  ath_mutex_lock (&ciphers_registered_lock);
403  err = _gcry_module_add (&ciphers_registered, 0,
404			  (void *)cipher,
405			  (void *)(extraspec? extraspec : &dummy_extra_spec),
406                          &mod);
407  ath_mutex_unlock (&ciphers_registered_lock);
408
409  if (! err)
410    {
411      *module = mod;
412      *algorithm_id = mod->mod_id;
413    }
414
415  return gcry_error (err);
416}
417
418/* Unregister the cipher identified by MODULE, which must have been
419   registered with gcry_cipher_register.  */
420void
421gcry_cipher_unregister (gcry_module_t module)
422{
423  ath_mutex_lock (&ciphers_registered_lock);
424  _gcry_module_release (module);
425  ath_mutex_unlock (&ciphers_registered_lock);
426}
427
428/* Locate the OID in the oid table and return the index or -1 when not
429   found.  An opitonal "oid." or "OID." prefix in OID is ignored, the
430   OID is expected to be in standard IETF dotted notation.  The
431   internal algorithm number is returned in ALGORITHM unless it
432   ispassed as NULL.  A pointer to the specification of the module
433   implementing this algorithm is return in OID_SPEC unless passed as
434   NULL.*/
435static int
436search_oid (const char *oid, int *algorithm, gcry_cipher_oid_spec_t *oid_spec)
437{
438  gcry_module_t module;
439  int ret = 0;
440
441  if (oid && ((! strncmp (oid, "oid.", 4))
442	      || (! strncmp (oid, "OID.", 4))))
443    oid += 4;
444
445  module = gcry_cipher_lookup_oid (oid);
446  if (module)
447    {
448      gcry_cipher_spec_t *cipher = module->spec;
449      int i;
450
451      for (i = 0; cipher->oids[i].oid && !ret; i++)
452	if (! stricmp (oid, cipher->oids[i].oid))
453	  {
454	    if (algorithm)
455	      *algorithm = module->mod_id;
456	    if (oid_spec)
457	      *oid_spec = cipher->oids[i];
458	    ret = 1;
459	  }
460      _gcry_module_release (module);
461    }
462
463  return ret;
464}
465
466/* Map STRING to the cipher algorithm identifier.  Returns the
467   algorithm ID of the cipher for the given name or 0 if the name is
468   not known.  It is valid to pass NULL for STRING which results in a
469   return value of 0. */
470int
471gcry_cipher_map_name (const char *string)
472{
473  gcry_module_t cipher;
474  int ret, algorithm = 0;
475
476  if (! string)
477    return 0;
478
479  REGISTER_DEFAULT_CIPHERS;
480
481  /* If the string starts with a digit (optionally prefixed with
482     either "OID." or "oid."), we first look into our table of ASN.1
483     object identifiers to figure out the algorithm */
484
485  ath_mutex_lock (&ciphers_registered_lock);
486
487  ret = search_oid (string, &algorithm, NULL);
488  if (! ret)
489    {
490      cipher = gcry_cipher_lookup_name (string);
491      if (cipher)
492	{
493	  algorithm = cipher->mod_id;
494	  _gcry_module_release (cipher);
495	}
496    }
497
498  ath_mutex_unlock (&ciphers_registered_lock);
499
500  return algorithm;
501}
502
503
504/* Given a STRING with an OID in dotted decimal notation, this
505   function returns the cipher mode (GCRY_CIPHER_MODE_*) associated
506   with that OID or 0 if no mode is known.  Passing NULL for string
507   yields a return value of 0. */
508int
509gcry_cipher_mode_from_oid (const char *string)
510{
511  gcry_cipher_oid_spec_t oid_spec;
512  int ret = 0, mode = 0;
513
514  if (!string)
515    return 0;
516
517  ath_mutex_lock (&ciphers_registered_lock);
518  ret = search_oid (string, NULL, &oid_spec);
519  if (ret)
520    mode = oid_spec.mode;
521  ath_mutex_unlock (&ciphers_registered_lock);
522
523  return mode;
524}
525
526
527/* Map the cipher algorithm whose ID is contained in ALGORITHM to a
528   string representation of the algorithm name.  For unknown algorithm
529   IDs this function returns "?".  */
530static const char *
531cipher_algo_to_string (int algorithm)
532{
533  gcry_module_t cipher;
534  const char *name;
535
536  REGISTER_DEFAULT_CIPHERS;
537
538  ath_mutex_lock (&ciphers_registered_lock);
539  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
540  if (cipher)
541    {
542      name = ((gcry_cipher_spec_t *) cipher->spec)->name;
543      _gcry_module_release (cipher);
544    }
545  else
546    name = "?";
547  ath_mutex_unlock (&ciphers_registered_lock);
548
549  return name;
550}
551
552/* Map the cipher algorithm identifier ALGORITHM to a string
553   representing this algorithm.  This string is the default name as
554   used by Libgcrypt.  An pointer to an empty string is returned for
555   an unknown algorithm.  NULL is never returned. */
556const char *
557gcry_cipher_algo_name (int algorithm)
558{
559  return cipher_algo_to_string (algorithm);
560}
561
562
563/* Flag the cipher algorithm with the identifier ALGORITHM as
564   disabled.  There is no error return, the function does nothing for
565   unknown algorithms.  Disabled algorithms are vitually not available
566   in Libgcrypt. */
567static void
568disable_cipher_algo (int algorithm)
569{
570  gcry_module_t cipher;
571
572  REGISTER_DEFAULT_CIPHERS;
573
574  ath_mutex_lock (&ciphers_registered_lock);
575  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
576  if (cipher)
577    {
578      if (! (cipher->flags & FLAG_MODULE_DISABLED))
579	cipher->flags |= FLAG_MODULE_DISABLED;
580      _gcry_module_release (cipher);
581    }
582  ath_mutex_unlock (&ciphers_registered_lock);
583}
584
585
586/* Return 0 if the cipher algorithm with identifier ALGORITHM is
587   available. Returns a basic error code value if it is not
588   available.  */
589static gcry_err_code_t
590check_cipher_algo (int algorithm)
591{
592  gcry_err_code_t err = GPG_ERR_NO_ERROR;
593  gcry_module_t cipher;
594
595  REGISTER_DEFAULT_CIPHERS;
596
597  ath_mutex_lock (&ciphers_registered_lock);
598  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
599  if (cipher)
600    {
601      if (cipher->flags & FLAG_MODULE_DISABLED)
602	err = GPG_ERR_CIPHER_ALGO;
603      _gcry_module_release (cipher);
604    }
605  else
606    err = GPG_ERR_CIPHER_ALGO;
607  ath_mutex_unlock (&ciphers_registered_lock);
608
609  return err;
610}
611
612
613/* Return the standard length of the key for the cipher algorithm with
614   the identifier ALGORITHM.  This function expects a valid algorithm
615   and will abort if the algorithm is not available or the length of
616   the key is not known. */
617static unsigned int
618cipher_get_keylen (int algorithm)
619{
620  gcry_module_t cipher;
621  unsigned len = 0;
622
623  REGISTER_DEFAULT_CIPHERS;
624
625  ath_mutex_lock (&ciphers_registered_lock);
626  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
627  if (cipher)
628    {
629      len = ((gcry_cipher_spec_t *) cipher->spec)->keylen;
630      if (!len)
631	log_bug ("cipher %d w/o key length\n", algorithm);
632      _gcry_module_release (cipher);
633    }
634  else
635    log_bug ("cipher %d not found\n", algorithm);
636  ath_mutex_unlock (&ciphers_registered_lock);
637
638  return len;
639}
640
641/* Return the block length of the cipher algorithm with the identifier
642   ALGORITHM.  This function expects a valid algorithm and will abort
643   if the algorithm is not available or the length of the key is not
644   known. */
645static unsigned int
646cipher_get_blocksize (int algorithm)
647{
648  gcry_module_t cipher;
649  unsigned len = 0;
650
651  REGISTER_DEFAULT_CIPHERS;
652
653  ath_mutex_lock (&ciphers_registered_lock);
654  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
655  if (cipher)
656    {
657      len = ((gcry_cipher_spec_t *) cipher->spec)->blocksize;
658      if (! len)
659	  log_bug ("cipher %d w/o blocksize\n", algorithm);
660      _gcry_module_release (cipher);
661    }
662  else
663    log_bug ("cipher %d not found\n", algorithm);
664  ath_mutex_unlock (&ciphers_registered_lock);
665
666  return len;
667}
668
669
670/*
671   Open a cipher handle for use with cipher algorithm ALGORITHM, using
672   the cipher mode MODE (one of the GCRY_CIPHER_MODE_*) and return a
673   handle in HANDLE.  Put NULL into HANDLE and return an error code if
674   something goes wrong.  FLAGS may be used to modify the
675   operation.  The defined flags are:
676
677   GCRY_CIPHER_SECURE:  allocate all internal buffers in secure memory.
678   GCRY_CIPHER_ENABLE_SYNC:  Enable the sync operation as used in OpenPGP.
679   GCRY_CIPHER_CBC_CTS:  Enable CTS mode.
680   GCRY_CIPHER_CBC_MAC:  Enable MAC mode.
681
682   Values for these flags may be combined using OR.
683 */
684gcry_error_t
685gcry_cipher_open (gcry_cipher_hd_t *handle,
686		  int algo, int mode, unsigned int flags)
687{
688  int secure = (flags & GCRY_CIPHER_SECURE);
689  gcry_cipher_spec_t *cipher = NULL;
690  cipher_extra_spec_t *extraspec = NULL;
691  gcry_module_t module = NULL;
692  gcry_cipher_hd_t h = NULL;
693  gcry_err_code_t err = 0;
694
695  /* If the application missed to call the random poll function, we do
696     it here to ensure that it is used once in a while. */
697  _gcry_fast_random_poll ();
698
699  REGISTER_DEFAULT_CIPHERS;
700
701  /* Fetch the according module and check whether the cipher is marked
702     available for use.  */
703  ath_mutex_lock (&ciphers_registered_lock);
704  module = _gcry_module_lookup_id (ciphers_registered, algo);
705  if (module)
706    {
707      /* Found module.  */
708
709      if (module->flags & FLAG_MODULE_DISABLED)
710	{
711	  /* Not available for use.  */
712	  err = GPG_ERR_CIPHER_ALGO;
713	}
714      else
715        {
716          cipher = (gcry_cipher_spec_t *) module->spec;
717          extraspec = module->extraspec;
718        }
719    }
720  else
721    err = GPG_ERR_CIPHER_ALGO;
722  ath_mutex_unlock (&ciphers_registered_lock);
723
724  /* check flags */
725  if ((! err)
726      && ((flags & ~(0
727		     | GCRY_CIPHER_SECURE
728		     | GCRY_CIPHER_ENABLE_SYNC
729		     | GCRY_CIPHER_CBC_CTS
730		     | GCRY_CIPHER_CBC_MAC))
731	  || (flags & GCRY_CIPHER_CBC_CTS & GCRY_CIPHER_CBC_MAC)))
732    err = GPG_ERR_CIPHER_ALGO;
733
734  /* check that a valid mode has been requested */
735  if (! err)
736    switch (mode)
737      {
738      case GCRY_CIPHER_MODE_ECB:
739      case GCRY_CIPHER_MODE_CBC:
740      case GCRY_CIPHER_MODE_CFB:
741      case GCRY_CIPHER_MODE_OFB:
742      case GCRY_CIPHER_MODE_CTR:
743      case GCRY_CIPHER_MODE_AESWRAP:
744	if ((cipher->encrypt == dummy_encrypt_block)
745	    || (cipher->decrypt == dummy_decrypt_block))
746	  err = GPG_ERR_INV_CIPHER_MODE;
747	break;
748
749      case GCRY_CIPHER_MODE_STREAM:
750	if ((cipher->stencrypt == dummy_encrypt_stream)
751	    || (cipher->stdecrypt == dummy_decrypt_stream))
752	  err = GPG_ERR_INV_CIPHER_MODE;
753	break;
754
755      case GCRY_CIPHER_MODE_NONE:
756        /* This mode may be used for debugging.  It copies the main
757           text verbatim to the ciphertext.  We do not allow this in
758           fips mode or if no debug flag has been set.  */
759	if (fips_mode () || !_gcry_get_debug_flag (0))
760          err = GPG_ERR_INV_CIPHER_MODE;
761	break;
762
763      default:
764	err = GPG_ERR_INV_CIPHER_MODE;
765      }
766
767  /* Perform selftest here and mark this with a flag in cipher_table?
768     No, we should not do this as it takes too long.  Further it does
769     not make sense to exclude algorithms with failing selftests at
770     runtime: If a selftest fails there is something seriously wrong
771     with the system and thus we better die immediately. */
772
773  if (! err)
774    {
775      size_t size = (sizeof (*h)
776                     + 2 * cipher->contextsize
777                     - sizeof (cipher_context_alignment_t)
778#ifdef NEED_16BYTE_ALIGNED_CONTEXT
779                     + 15  /* Space for leading alignment gap.  */
780#endif /*NEED_16BYTE_ALIGNED_CONTEXT*/
781                     );
782
783      if (secure)
784	h = gcry_calloc_secure (1, size);
785      else
786	h = gcry_calloc (1, size);
787
788      if (! h)
789	err = gpg_err_code_from_syserror ();
790      else
791	{
792          size_t off = 0;
793
794#ifdef NEED_16BYTE_ALIGNED_CONTEXT
795          if ( ((unsigned long)h & 0x0f) )
796            {
797              /* The malloced block is not aligned on a 16 byte
798                 boundary.  Correct for this.  */
799              off = 16 - ((unsigned long)h & 0x0f);
800              h = (void*)((char*)h + off);
801            }
802#endif /*NEED_16BYTE_ALIGNED_CONTEXT*/
803
804	  h->magic = secure ? CTX_MAGIC_SECURE : CTX_MAGIC_NORMAL;
805          h->actual_handle_size = size - off;
806          h->handle_offset = off;
807	  h->cipher = cipher;
808	  h->extraspec = extraspec;
809	  h->module = module;
810          h->algo = algo;
811	  h->mode = mode;
812	  h->flags = flags;
813
814          /* Setup bulk encryption routines.  */
815          switch (algo)
816            {
817#ifdef USE_AES
818            case GCRY_CIPHER_AES128:
819            case GCRY_CIPHER_AES192:
820            case GCRY_CIPHER_AES256:
821              h->bulk.cfb_enc = _gcry_aes_cfb_enc;
822              h->bulk.cfb_dec = _gcry_aes_cfb_dec;
823              h->bulk.cbc_enc = _gcry_aes_cbc_enc;
824              h->bulk.cbc_dec = _gcry_aes_cbc_dec;
825              h->bulk.ctr_enc = _gcry_aes_ctr_enc;
826              break;
827#endif /*USE_AES*/
828
829            default:
830              break;
831            }
832	}
833    }
834
835  /* Done.  */
836
837  if (err)
838    {
839      if (module)
840	{
841	  /* Release module.  */
842	  ath_mutex_lock (&ciphers_registered_lock);
843	  _gcry_module_release (module);
844	  ath_mutex_unlock (&ciphers_registered_lock);
845	}
846    }
847
848  *handle = err ? NULL : h;
849
850  return gcry_error (err);
851}
852
853
854/* Release all resources associated with the cipher handle H. H may be
855   NULL in which case this is a no-operation. */
856void
857gcry_cipher_close (gcry_cipher_hd_t h)
858{
859  size_t off;
860
861  if (!h)
862    return;
863
864  if ((h->magic != CTX_MAGIC_SECURE)
865      && (h->magic != CTX_MAGIC_NORMAL))
866    _gcry_fatal_error(GPG_ERR_INTERNAL,
867		      "gcry_cipher_close: already closed/invalid handle");
868  else
869    h->magic = 0;
870
871  /* Release module.  */
872  ath_mutex_lock (&ciphers_registered_lock);
873  _gcry_module_release (h->module);
874  ath_mutex_unlock (&ciphers_registered_lock);
875
876  /* We always want to wipe out the memory even when the context has
877     been allocated in secure memory.  The user might have disabled
878     secure memory or is using his own implementation which does not
879     do the wiping.  To accomplish this we need to keep track of the
880     actual size of this structure because we have no way to known
881     how large the allocated area was when using a standard malloc. */
882  off = h->handle_offset;
883  wipememory (h, h->actual_handle_size);
884
885  gcry_free ((char*)h - off);
886}
887
888
889/* Set the key to be used for the encryption context C to KEY with
890   length KEYLEN.  The length should match the required length. */
891static gcry_error_t
892cipher_setkey (gcry_cipher_hd_t c, byte *key, unsigned int keylen)
893{
894  gcry_err_code_t ret;
895
896  ret = (*c->cipher->setkey) (&c->context.c, key, keylen);
897  if (!ret)
898    {
899      /* Duplicate initial context.  */
900      memcpy ((void *) ((char *) &c->context.c + c->cipher->contextsize),
901              (void *) &c->context.c,
902              c->cipher->contextsize);
903      c->marks.key = 1;
904    }
905  else
906    c->marks.key = 0;
907
908  return gcry_error (ret);
909}
910
911
912/* Set the IV to be used for the encryption context C to IV with
913   length IVLEN.  The length should match the required length. */
914static void
915cipher_setiv( gcry_cipher_hd_t c, const byte *iv, unsigned ivlen )
916{
917  memset (c->u_iv.iv, 0, c->cipher->blocksize);
918  if (iv)
919    {
920      if (ivlen != c->cipher->blocksize)
921        {
922          log_info ("WARNING: cipher_setiv: ivlen=%u blklen=%u\n",
923                    ivlen, (unsigned int)c->cipher->blocksize);
924          fips_signal_error ("IV length does not match blocklength");
925        }
926      if (ivlen > c->cipher->blocksize)
927        ivlen = c->cipher->blocksize;
928      memcpy (c->u_iv.iv, iv, ivlen);
929      c->marks.iv = 1;
930    }
931  else
932      c->marks.iv = 0;
933  c->unused = 0;
934}
935
936
937/* Reset the cipher context to the initial context.  This is basically
938   the same as an release followed by a new. */
939static void
940cipher_reset (gcry_cipher_hd_t c)
941{
942  memcpy (&c->context.c,
943	  (char *) &c->context.c + c->cipher->contextsize,
944	  c->cipher->contextsize);
945  memset (&c->marks, 0, sizeof c->marks);
946  memset (c->u_iv.iv, 0, c->cipher->blocksize);
947  memset (c->lastiv, 0, c->cipher->blocksize);
948  memset (c->u_ctr.ctr, 0, c->cipher->blocksize);
949}
950
951
952
953static gcry_err_code_t
954do_ecb_encrypt (gcry_cipher_hd_t c,
955                unsigned char *outbuf, unsigned int outbuflen,
956                const unsigned char *inbuf, unsigned int inbuflen)
957{
958  unsigned int blocksize = c->cipher->blocksize;
959  unsigned int n, nblocks;
960
961  if (outbuflen < inbuflen)
962    return GPG_ERR_BUFFER_TOO_SHORT;
963  if ((inbuflen % blocksize))
964    return GPG_ERR_INV_LENGTH;
965
966  nblocks = inbuflen / c->cipher->blocksize;
967
968  for (n=0; n < nblocks; n++ )
969    {
970      c->cipher->encrypt (&c->context.c, outbuf, (byte*)/*arggg*/inbuf);
971      inbuf  += blocksize;
972      outbuf += blocksize;
973    }
974  return 0;
975}
976
977static gcry_err_code_t
978do_ecb_decrypt (gcry_cipher_hd_t c,
979                unsigned char *outbuf, unsigned int outbuflen,
980                const unsigned char *inbuf, unsigned int inbuflen)
981{
982  unsigned int blocksize = c->cipher->blocksize;
983  unsigned int n, nblocks;
984
985  if (outbuflen < inbuflen)
986    return GPG_ERR_BUFFER_TOO_SHORT;
987  if ((inbuflen % blocksize))
988    return GPG_ERR_INV_LENGTH;
989  nblocks = inbuflen / c->cipher->blocksize;
990
991  for (n=0; n < nblocks; n++ )
992    {
993      c->cipher->decrypt (&c->context.c, outbuf, (byte*)/*arggg*/inbuf );
994      inbuf  += blocksize;
995      outbuf += blocksize;
996    }
997
998  return 0;
999}
1000
1001
1002static gcry_err_code_t
1003do_cbc_encrypt (gcry_cipher_hd_t c,
1004                unsigned char *outbuf, unsigned int outbuflen,
1005                const unsigned char *inbuf, unsigned int inbuflen)
1006{
1007  unsigned int n;
1008  unsigned char *ivp;
1009  int i;
1010  size_t blocksize = c->cipher->blocksize;
1011  unsigned nblocks = inbuflen / blocksize;
1012
1013  if (outbuflen < ((c->flags & GCRY_CIPHER_CBC_MAC)? blocksize : inbuflen))
1014    return GPG_ERR_BUFFER_TOO_SHORT;
1015
1016  if ((inbuflen % c->cipher->blocksize)
1017      && !(inbuflen > c->cipher->blocksize
1018           && (c->flags & GCRY_CIPHER_CBC_CTS)))
1019    return GPG_ERR_INV_LENGTH;
1020
1021  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1022    {
1023      if ((inbuflen % blocksize) == 0)
1024	nblocks--;
1025    }
1026
1027  if (c->bulk.cbc_enc)
1028    {
1029      c->bulk.cbc_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks,
1030                       (c->flags & GCRY_CIPHER_CBC_MAC));
1031      inbuf  += nblocks * blocksize;
1032      if (!(c->flags & GCRY_CIPHER_CBC_MAC))
1033        outbuf += nblocks * blocksize;
1034    }
1035  else
1036    {
1037      for (n=0; n < nblocks; n++ )
1038        {
1039          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1040            outbuf[i] = inbuf[i] ^ *ivp++;
1041          c->cipher->encrypt ( &c->context.c, outbuf, outbuf );
1042          memcpy (c->u_iv.iv, outbuf, blocksize );
1043          inbuf  += blocksize;
1044          if (!(c->flags & GCRY_CIPHER_CBC_MAC))
1045            outbuf += blocksize;
1046        }
1047    }
1048
1049  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1050    {
1051      /* We have to be careful here, since outbuf might be equal to
1052         inbuf.  */
1053      int restbytes;
1054      unsigned char b;
1055
1056      if ((inbuflen % blocksize) == 0)
1057        restbytes = blocksize;
1058      else
1059        restbytes = inbuflen % blocksize;
1060
1061      outbuf -= blocksize;
1062      for (ivp = c->u_iv.iv, i = 0; i < restbytes; i++)
1063        {
1064          b = inbuf[i];
1065          outbuf[blocksize + i] = outbuf[i];
1066          outbuf[i] = b ^ *ivp++;
1067        }
1068      for (; i < blocksize; i++)
1069        outbuf[i] = 0 ^ *ivp++;
1070
1071      c->cipher->encrypt (&c->context.c, outbuf, outbuf);
1072      memcpy (c->u_iv.iv, outbuf, blocksize);
1073    }
1074
1075  return 0;
1076}
1077
1078
1079static gcry_err_code_t
1080do_cbc_decrypt (gcry_cipher_hd_t c,
1081                unsigned char *outbuf, unsigned int outbuflen,
1082                const unsigned char *inbuf, unsigned int inbuflen)
1083{
1084  unsigned int n;
1085  unsigned char *ivp;
1086  int i;
1087  size_t blocksize = c->cipher->blocksize;
1088  unsigned int nblocks = inbuflen / blocksize;
1089
1090  if (outbuflen < inbuflen)
1091    return GPG_ERR_BUFFER_TOO_SHORT;
1092
1093  if ((inbuflen % c->cipher->blocksize)
1094      && !(inbuflen > c->cipher->blocksize
1095           && (c->flags & GCRY_CIPHER_CBC_CTS)))
1096    return GPG_ERR_INV_LENGTH;
1097
1098  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1099    {
1100      nblocks--;
1101      if ((inbuflen % blocksize) == 0)
1102	nblocks--;
1103      memcpy (c->lastiv, c->u_iv.iv, blocksize);
1104    }
1105
1106  if (c->bulk.cbc_dec)
1107    {
1108      c->bulk.cbc_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1109      inbuf  += nblocks * blocksize;
1110      outbuf += nblocks * blocksize;
1111    }
1112  else
1113    {
1114      for (n=0; n < nblocks; n++ )
1115        {
1116          /* Because outbuf and inbuf might be the same, we have to
1117           * save the original ciphertext block.  We use LASTIV for
1118           * this here because it is not used otherwise. */
1119          memcpy (c->lastiv, inbuf, blocksize);
1120          c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
1121          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1122	    outbuf[i] ^= *ivp++;
1123          memcpy(c->u_iv.iv, c->lastiv, blocksize );
1124          inbuf  += c->cipher->blocksize;
1125          outbuf += c->cipher->blocksize;
1126        }
1127    }
1128
1129  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1130    {
1131      int restbytes;
1132
1133      if ((inbuflen % blocksize) == 0)
1134        restbytes = blocksize;
1135      else
1136        restbytes = inbuflen % blocksize;
1137
1138      memcpy (c->lastiv, c->u_iv.iv, blocksize );         /* Save Cn-2. */
1139      memcpy (c->u_iv.iv, inbuf + blocksize, restbytes ); /* Save Cn. */
1140
1141      c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
1142      for (ivp=c->u_iv.iv,i=0; i < restbytes; i++ )
1143        outbuf[i] ^= *ivp++;
1144
1145      memcpy(outbuf + blocksize, outbuf, restbytes);
1146      for(i=restbytes; i < blocksize; i++)
1147        c->u_iv.iv[i] = outbuf[i];
1148      c->cipher->decrypt (&c->context.c, outbuf, c->u_iv.iv);
1149      for(ivp=c->lastiv,i=0; i < blocksize; i++ )
1150        outbuf[i] ^= *ivp++;
1151      /* c->lastiv is now really lastlastiv, does this matter? */
1152    }
1153
1154  return 0;
1155}
1156
1157
1158static gcry_err_code_t
1159do_cfb_encrypt (gcry_cipher_hd_t c,
1160                unsigned char *outbuf, unsigned int outbuflen,
1161                const unsigned char *inbuf, unsigned int inbuflen)
1162{
1163  unsigned char *ivp;
1164  size_t blocksize = c->cipher->blocksize;
1165  size_t blocksize_x_2 = blocksize + blocksize;
1166
1167  if (outbuflen < inbuflen)
1168    return GPG_ERR_BUFFER_TOO_SHORT;
1169
1170  if ( inbuflen <= c->unused )
1171    {
1172      /* Short enough to be encoded by the remaining XOR mask. */
1173      /* XOR the input with the IV and store input into IV. */
1174      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
1175           inbuflen;
1176           inbuflen--, c->unused-- )
1177        *outbuf++ = (*ivp++ ^= *inbuf++);
1178      return 0;
1179    }
1180
1181  if ( c->unused )
1182    {
1183      /* XOR the input with the IV and store input into IV */
1184      inbuflen -= c->unused;
1185      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1186        *outbuf++ = (*ivp++ ^= *inbuf++);
1187    }
1188
1189  /* Now we can process complete blocks.  We use a loop as long as we
1190     have at least 2 blocks and use conditions for the rest.  This
1191     also allows to use a bulk encryption function if available.  */
1192  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_enc)
1193    {
1194      unsigned int nblocks = inbuflen / blocksize;
1195      c->bulk.cfb_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1196      outbuf += nblocks * blocksize;
1197      inbuf  += nblocks * blocksize;
1198      inbuflen -= nblocks * blocksize;
1199    }
1200  else
1201    {
1202      while ( inbuflen >= blocksize_x_2 )
1203        {
1204          int i;
1205          /* Encrypt the IV. */
1206          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1207          /* XOR the input with the IV and store input into IV.  */
1208          for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1209            *outbuf++ = (*ivp++ ^= *inbuf++);
1210          inbuflen -= blocksize;
1211        }
1212    }
1213
1214  if ( inbuflen >= blocksize )
1215    {
1216      int i;
1217      /* Save the current IV and then encrypt the IV. */
1218      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1219      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1220      /* XOR the input with the IV and store input into IV */
1221      for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1222        *outbuf++ = (*ivp++ ^= *inbuf++);
1223      inbuflen -= blocksize;
1224    }
1225  if ( inbuflen )
1226    {
1227      /* Save the current IV and then encrypt the IV. */
1228      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1229      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1230      c->unused = blocksize;
1231      /* Apply the XOR. */
1232      c->unused -= inbuflen;
1233      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1234        *outbuf++ = (*ivp++ ^= *inbuf++);
1235    }
1236  return 0;
1237}
1238
1239
1240static gcry_err_code_t
1241do_cfb_decrypt (gcry_cipher_hd_t c,
1242                unsigned char *outbuf, unsigned int outbuflen,
1243                const unsigned char *inbuf, unsigned int inbuflen)
1244{
1245  unsigned char *ivp;
1246  unsigned long temp;
1247  int i;
1248  size_t blocksize = c->cipher->blocksize;
1249  size_t blocksize_x_2 = blocksize + blocksize;
1250
1251  if (outbuflen < inbuflen)
1252    return GPG_ERR_BUFFER_TOO_SHORT;
1253
1254  if (inbuflen <= c->unused)
1255    {
1256      /* Short enough to be encoded by the remaining XOR mask. */
1257      /* XOR the input with the IV and store input into IV. */
1258      for (ivp=c->u_iv.iv+blocksize - c->unused;
1259           inbuflen;
1260           inbuflen--, c->unused--)
1261        {
1262          temp = *inbuf++;
1263          *outbuf++ = *ivp ^ temp;
1264          *ivp++ = temp;
1265        }
1266      return 0;
1267    }
1268
1269  if (c->unused)
1270    {
1271      /* XOR the input with the IV and store input into IV. */
1272      inbuflen -= c->unused;
1273      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1274        {
1275          temp = *inbuf++;
1276          *outbuf++ = *ivp ^ temp;
1277          *ivp++ = temp;
1278        }
1279    }
1280
1281  /* Now we can process complete blocks.  We use a loop as long as we
1282     have at least 2 blocks and use conditions for the rest.  This
1283     also allows to use a bulk encryption function if available.  */
1284  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_dec)
1285    {
1286      unsigned int nblocks = inbuflen / blocksize;
1287      c->bulk.cfb_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1288      outbuf += nblocks * blocksize;
1289      inbuf  += nblocks * blocksize;
1290      inbuflen -= nblocks * blocksize;
1291    }
1292  else
1293    {
1294      while (inbuflen >= blocksize_x_2 )
1295        {
1296          /* Encrypt the IV. */
1297          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1298          /* XOR the input with the IV and store input into IV. */
1299          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1300            {
1301              temp = *inbuf++;
1302              *outbuf++ = *ivp ^ temp;
1303              *ivp++ = temp;
1304            }
1305          inbuflen -= blocksize;
1306        }
1307    }
1308
1309  if (inbuflen >= blocksize )
1310    {
1311      /* Save the current IV and then encrypt the IV. */
1312      memcpy ( c->lastiv, c->u_iv.iv, blocksize);
1313      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1314      /* XOR the input with the IV and store input into IV */
1315      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1316        {
1317          temp = *inbuf++;
1318          *outbuf++ = *ivp ^ temp;
1319          *ivp++ = temp;
1320        }
1321      inbuflen -= blocksize;
1322    }
1323
1324  if (inbuflen)
1325    {
1326      /* Save the current IV and then encrypt the IV. */
1327      memcpy ( c->lastiv, c->u_iv.iv, blocksize );
1328      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1329      c->unused = blocksize;
1330      /* Apply the XOR. */
1331      c->unused -= inbuflen;
1332      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1333        {
1334          temp = *inbuf++;
1335          *outbuf++ = *ivp ^ temp;
1336          *ivp++ = temp;
1337        }
1338    }
1339  return 0;
1340}
1341
1342
1343static gcry_err_code_t
1344do_ofb_encrypt (gcry_cipher_hd_t c,
1345                unsigned char *outbuf, unsigned int outbuflen,
1346                const unsigned char *inbuf, unsigned int inbuflen)
1347{
1348  unsigned char *ivp;
1349  size_t blocksize = c->cipher->blocksize;
1350
1351  if (outbuflen < inbuflen)
1352    return GPG_ERR_BUFFER_TOO_SHORT;
1353
1354  if ( inbuflen <= c->unused )
1355    {
1356      /* Short enough to be encoded by the remaining XOR mask. */
1357      /* XOR the input with the IV */
1358      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
1359           inbuflen;
1360           inbuflen--, c->unused-- )
1361        *outbuf++ = (*ivp++ ^ *inbuf++);
1362      return 0;
1363    }
1364
1365  if( c->unused )
1366    {
1367      inbuflen -= c->unused;
1368      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1369        *outbuf++ = (*ivp++ ^ *inbuf++);
1370    }
1371
1372  /* Now we can process complete blocks. */
1373  while ( inbuflen >= blocksize )
1374    {
1375      int i;
1376      /* Encrypt the IV (and save the current one). */
1377      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1378      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1379
1380      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1381        *outbuf++ = (*ivp++ ^ *inbuf++);
1382      inbuflen -= blocksize;
1383    }
1384  if ( inbuflen )
1385    { /* process the remaining bytes */
1386      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1387      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1388      c->unused = blocksize;
1389      c->unused -= inbuflen;
1390      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1391        *outbuf++ = (*ivp++ ^ *inbuf++);
1392    }
1393  return 0;
1394}
1395
1396static gcry_err_code_t
1397do_ofb_decrypt (gcry_cipher_hd_t c,
1398                unsigned char *outbuf, unsigned int outbuflen,
1399                const unsigned char *inbuf, unsigned int inbuflen)
1400{
1401  unsigned char *ivp;
1402  size_t blocksize = c->cipher->blocksize;
1403
1404  if (outbuflen < inbuflen)
1405    return GPG_ERR_BUFFER_TOO_SHORT;
1406
1407  if( inbuflen <= c->unused )
1408    {
1409      /* Short enough to be encoded by the remaining XOR mask. */
1410      for (ivp=c->u_iv.iv+blocksize - c->unused; inbuflen; inbuflen--,c->unused--)
1411        *outbuf++ = *ivp++ ^ *inbuf++;
1412      return 0;
1413    }
1414
1415  if ( c->unused )
1416    {
1417      inbuflen -= c->unused;
1418      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1419        *outbuf++ = *ivp++ ^ *inbuf++;
1420    }
1421
1422  /* Now we can process complete blocks. */
1423  while ( inbuflen >= blocksize )
1424    {
1425      int i;
1426      /* Encrypt the IV (and save the current one). */
1427      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1428      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1429      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1430        *outbuf++ = *ivp++ ^ *inbuf++;
1431      inbuflen -= blocksize;
1432    }
1433  if ( inbuflen )
1434    { /* Process the remaining bytes. */
1435      /* Encrypt the IV (and save the current one). */
1436      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1437      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1438      c->unused = blocksize;
1439      c->unused -= inbuflen;
1440      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1441        *outbuf++ = *ivp++ ^ *inbuf++;
1442    }
1443  return 0;
1444}
1445
1446
1447static gcry_err_code_t
1448do_ctr_encrypt (gcry_cipher_hd_t c,
1449                unsigned char *outbuf, unsigned int outbuflen,
1450                const unsigned char *inbuf, unsigned int inbuflen)
1451{
1452  unsigned int n;
1453  int i;
1454  unsigned int blocksize = c->cipher->blocksize;
1455  unsigned int nblocks;
1456
1457  if (outbuflen < inbuflen)
1458    return GPG_ERR_BUFFER_TOO_SHORT;
1459
1460  /* First process a left over encrypted counter.  */
1461  if (c->unused)
1462    {
1463      gcry_assert (c->unused < blocksize);
1464      i = blocksize - c->unused;
1465      for (n=0; c->unused && n < inbuflen; c->unused--, n++, i++)
1466        {
1467          /* XOR input with encrypted counter and store in output.  */
1468          outbuf[n] = inbuf[n] ^ c->lastiv[i];
1469        }
1470      inbuf  += n;
1471      outbuf += n;
1472      inbuflen -= n;
1473    }
1474
1475
1476  /* Use a bulk method if available.  */
1477  nblocks = inbuflen / blocksize;
1478  if (nblocks && c->bulk.ctr_enc)
1479    {
1480      c->bulk.ctr_enc (&c->context.c, c->u_ctr.ctr, outbuf, inbuf, nblocks);
1481      inbuf  += nblocks * blocksize;
1482      outbuf += nblocks * blocksize;
1483      inbuflen -= nblocks * blocksize;
1484    }
1485
1486  /* If we don't have a bulk method use the standard method.  We also
1487     use this method for the a remaining partial block.  */
1488  if (inbuflen)
1489    {
1490      unsigned char tmp[MAX_BLOCKSIZE];
1491
1492      for (n=0; n < inbuflen; n++)
1493        {
1494          if ((n % blocksize) == 0)
1495            {
1496              c->cipher->encrypt (&c->context.c, tmp, c->u_ctr.ctr);
1497
1498              for (i = blocksize; i > 0; i--)
1499                {
1500                  c->u_ctr.ctr[i-1]++;
1501                  if (c->u_ctr.ctr[i-1] != 0)
1502                    break;
1503                }
1504            }
1505
1506          /* XOR input with encrypted counter and store in output.  */
1507          outbuf[n] = inbuf[n] ^ tmp[n % blocksize];
1508        }
1509
1510      /* Save the unused bytes of the counter.  */
1511      n %= blocksize;
1512      c->unused = (blocksize - n) % blocksize;
1513      if (c->unused)
1514        memcpy (c->lastiv+n, tmp+n, c->unused);
1515
1516      wipememory (tmp, sizeof tmp);
1517    }
1518
1519  return 0;
1520}
1521
1522static gcry_err_code_t
1523do_ctr_decrypt (gcry_cipher_hd_t c,
1524                unsigned char *outbuf, unsigned int outbuflen,
1525                const unsigned char *inbuf, unsigned int inbuflen)
1526{
1527  return do_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1528}
1529
1530
1531/* Perform the AES-Wrap algorithm as specified by RFC3394.  We
1532   implement this as a mode usable with any cipher algorithm of
1533   blocksize 128.  */
1534static gcry_err_code_t
1535do_aeswrap_encrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1536                    const byte *inbuf, unsigned int inbuflen )
1537{
1538  int j, x;
1539  unsigned int n, i;
1540  unsigned char *r, *a, *b;
1541  unsigned char t[8];
1542
1543#if MAX_BLOCKSIZE < 8
1544#error Invalid block size
1545#endif
1546  /* We require a cipher with a 128 bit block length.  */
1547  if (c->cipher->blocksize != 16)
1548    return GPG_ERR_INV_LENGTH;
1549
1550  /* The output buffer must be able to hold the input data plus one
1551     additional block.  */
1552  if (outbuflen < inbuflen + 8)
1553    return GPG_ERR_BUFFER_TOO_SHORT;
1554  /* Input data must be multiple of 64 bits.  */
1555  if (inbuflen % 8)
1556    return GPG_ERR_INV_ARG;
1557
1558  n = inbuflen / 8;
1559
1560  /* We need at least two 64 bit blocks.  */
1561  if (n < 2)
1562    return GPG_ERR_INV_ARG;
1563
1564  r = outbuf;
1565  a = outbuf;  /* We store A directly in OUTBUF.  */
1566  b = c->u_ctr.ctr;  /* B is also used to concatenate stuff.  */
1567
1568  /* If an IV has been set we use that IV as the Alternative Initial
1569     Value; if it has not been set we use the standard value.  */
1570  if (c->marks.iv)
1571    memcpy (a, c->u_iv.iv, 8);
1572  else
1573    memset (a, 0xa6, 8);
1574
1575  /* Copy the inbuf to the outbuf. */
1576  memmove (r+8, inbuf, inbuflen);
1577
1578  memset (t, 0, sizeof t); /* t := 0.  */
1579
1580  for (j = 0; j <= 5; j++)
1581    {
1582      for (i = 1; i <= n; i++)
1583        {
1584          /* B := AES_k( A | R[i] ) */
1585          memcpy (b, a, 8);
1586          memcpy (b+8, r+i*8, 8);
1587          c->cipher->encrypt (&c->context.c, b, b);
1588          /* t := t + 1  */
1589	  for (x = 7; x >= 0; x--)
1590	    {
1591	      t[x]++;
1592	      if (t[x])
1593		break;
1594	    }
1595          /* A := MSB_64(B) ^ t */
1596          for (x=0; x < 8; x++)
1597            a[x] = b[x] ^ t[x];
1598          /* R[i] := LSB_64(B) */
1599          memcpy (r+i*8, b+8, 8);
1600        }
1601   }
1602
1603  return 0;
1604}
1605
1606/* Perform the AES-Unwrap algorithm as specified by RFC3394.  We
1607   implement this as a mode usable with any cipher algorithm of
1608   blocksize 128.  */
1609static gcry_err_code_t
1610do_aeswrap_decrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1611                    const byte *inbuf, unsigned int inbuflen)
1612{
1613  int j, x;
1614  unsigned int n, i;
1615  unsigned char *r, *a, *b;
1616  unsigned char t[8];
1617
1618#if MAX_BLOCKSIZE < 8
1619#error Invalid block size
1620#endif
1621  /* We require a cipher with a 128 bit block length.  */
1622  if (c->cipher->blocksize != 16)
1623    return GPG_ERR_INV_LENGTH;
1624
1625  /* The output buffer must be able to hold the input data minus one
1626     additional block.  Fixme: The caller has more restrictive checks
1627     - we may want to fix them for this mode.  */
1628  if (outbuflen + 8  < inbuflen)
1629    return GPG_ERR_BUFFER_TOO_SHORT;
1630  /* Input data must be multiple of 64 bits.  */
1631  if (inbuflen % 8)
1632    return GPG_ERR_INV_ARG;
1633
1634  n = inbuflen / 8;
1635
1636  /* We need at least three 64 bit blocks.  */
1637  if (n < 3)
1638    return GPG_ERR_INV_ARG;
1639
1640  r = outbuf;
1641  a = c->lastiv;  /* We use c->LASTIV as buffer for A.  */
1642  b = c->u_ctr.ctr;     /* B is also used to concatenate stuff.  */
1643
1644  /* Copy the inbuf to the outbuf and save A. */
1645  memcpy (a, inbuf, 8);
1646  memmove (r, inbuf+8, inbuflen-8);
1647  n--; /* Reduce to actual number of data blocks.  */
1648
1649  /* t := 6 * n  */
1650  i = n * 6;  /* The range is valid because: n = inbuflen / 8 - 1.  */
1651  for (x=0; x < 8 && x < sizeof (i); x++)
1652    t[7-x] = i >> (8*x);
1653  for (; x < 8; x++)
1654    t[7-x] = 0;
1655
1656  for (j = 5; j >= 0; j--)
1657    {
1658      for (i = n; i >= 1; i--)
1659        {
1660          /* B := AES_k^1( (A ^ t)| R[i] ) */
1661          for (x = 0; x < 8; x++)
1662            b[x] = a[x] ^ t[x];
1663          memcpy (b+8, r+(i-1)*8, 8);
1664          c->cipher->decrypt (&c->context.c, b, b);
1665          /* t := t - 1  */
1666	  for (x = 7; x >= 0; x--)
1667	    {
1668	      t[x]--;
1669	      if (t[x] != 0xff)
1670		break;
1671	    }
1672          /* A := MSB_64(B) */
1673          memcpy (a, b, 8);
1674          /* R[i] := LSB_64(B) */
1675          memcpy (r+(i-1)*8, b+8, 8);
1676        }
1677   }
1678
1679  /* If an IV has been set we compare against this Alternative Initial
1680     Value; if it has not been set we compare against the standard IV.  */
1681  if (c->marks.iv)
1682    j = memcmp (a, c->u_iv.iv, 8);
1683  else
1684    {
1685      for (j=0, x=0; x < 8; x++)
1686        if (a[x] != 0xa6)
1687          {
1688            j=1;
1689            break;
1690          }
1691    }
1692  return j? GPG_ERR_CHECKSUM : 0;
1693}
1694
1695
1696/****************
1697 * Encrypt INBUF to OUTBUF with the mode selected at open.
1698 * inbuf and outbuf may overlap or be the same.
1699 * Depending on the mode some constraints apply to INBUFLEN.
1700 */
1701static gcry_err_code_t
1702cipher_encrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1703		const byte *inbuf, unsigned int inbuflen)
1704{
1705  gcry_err_code_t rc;
1706
1707  switch (c->mode)
1708    {
1709    case GCRY_CIPHER_MODE_ECB:
1710      rc = do_ecb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1711      break;
1712
1713    case GCRY_CIPHER_MODE_CBC:
1714      rc = do_cbc_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1715      break;
1716
1717    case GCRY_CIPHER_MODE_CFB:
1718      rc = do_cfb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1719      break;
1720
1721    case GCRY_CIPHER_MODE_OFB:
1722      rc = do_ofb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1723      break;
1724
1725    case GCRY_CIPHER_MODE_CTR:
1726      rc = do_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1727      break;
1728
1729    case GCRY_CIPHER_MODE_AESWRAP:
1730      rc = do_aeswrap_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1731      break;
1732
1733    case GCRY_CIPHER_MODE_STREAM:
1734      c->cipher->stencrypt (&c->context.c,
1735                            outbuf, (byte*)/*arggg*/inbuf, inbuflen);
1736      rc = 0;
1737      break;
1738
1739    case GCRY_CIPHER_MODE_NONE:
1740      if (fips_mode () || !_gcry_get_debug_flag (0))
1741        {
1742          fips_signal_error ("cipher mode NONE used");
1743          rc = GPG_ERR_INV_CIPHER_MODE;
1744        }
1745      else
1746        {
1747          if (inbuf != outbuf)
1748            memmove (outbuf, inbuf, inbuflen);
1749          rc = 0;
1750        }
1751      break;
1752
1753    default:
1754      log_fatal ("cipher_encrypt: invalid mode %d\n", c->mode );
1755      rc = GPG_ERR_INV_CIPHER_MODE;
1756      break;
1757    }
1758
1759  return rc;
1760}
1761
1762
1763/****************
1764 * Encrypt IN and write it to OUT.  If IN is NULL, in-place encryption has
1765 * been requested.
1766 */
1767gcry_error_t
1768gcry_cipher_encrypt (gcry_cipher_hd_t h, void *out, size_t outsize,
1769                     const void *in, size_t inlen)
1770{
1771  gcry_err_code_t err;
1772
1773  if (!in)  /* Caller requested in-place encryption.  */
1774    err = cipher_encrypt (h, out, outsize, out, outsize);
1775  else
1776    err = cipher_encrypt (h, out, outsize, in, inlen);
1777
1778  /* Failsafe: Make sure that the plaintext will never make it into
1779     OUT if the encryption returned an error.  */
1780  if (err && out)
1781    memset (out, 0x42, outsize);
1782
1783  return gcry_error (err);
1784}
1785
1786
1787
1788/****************
1789 * Decrypt INBUF to OUTBUF with the mode selected at open.
1790 * inbuf and outbuf may overlap or be the same.
1791 * Depending on the mode some some contraints apply to INBUFLEN.
1792 */
1793static gcry_err_code_t
1794cipher_decrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1795                const byte *inbuf, unsigned int inbuflen)
1796{
1797  gcry_err_code_t rc;
1798
1799  switch (c->mode)
1800    {
1801    case GCRY_CIPHER_MODE_ECB:
1802      rc = do_ecb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1803      break;
1804
1805    case GCRY_CIPHER_MODE_CBC:
1806      rc = do_cbc_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1807      break;
1808
1809    case GCRY_CIPHER_MODE_CFB:
1810      rc = do_cfb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1811      break;
1812
1813    case GCRY_CIPHER_MODE_OFB:
1814      rc = do_ofb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1815      break;
1816
1817    case GCRY_CIPHER_MODE_CTR:
1818      rc = do_ctr_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1819      break;
1820
1821    case GCRY_CIPHER_MODE_AESWRAP:
1822      rc = do_aeswrap_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1823      break;
1824
1825    case GCRY_CIPHER_MODE_STREAM:
1826      c->cipher->stdecrypt (&c->context.c,
1827                            outbuf, (byte*)/*arggg*/inbuf, inbuflen);
1828      rc = 0;
1829      break;
1830
1831    case GCRY_CIPHER_MODE_NONE:
1832      if (fips_mode () || !_gcry_get_debug_flag (0))
1833        {
1834          fips_signal_error ("cipher mode NONE used");
1835          rc = GPG_ERR_INV_CIPHER_MODE;
1836        }
1837      else
1838        {
1839          if (inbuf != outbuf)
1840            memmove (outbuf, inbuf, inbuflen);
1841          rc = 0;
1842        }
1843      break;
1844
1845    default:
1846      log_fatal ("cipher_decrypt: invalid mode %d\n", c->mode );
1847      rc = GPG_ERR_INV_CIPHER_MODE;
1848      break;
1849    }
1850
1851  return rc;
1852}
1853
1854
1855gcry_error_t
1856gcry_cipher_decrypt (gcry_cipher_hd_t h, void *out, size_t outsize,
1857		     const void *in, size_t inlen)
1858{
1859  gcry_err_code_t err;
1860
1861  if (!in) /* Caller requested in-place encryption. */
1862    err = cipher_decrypt (h, out, outsize, out, outsize);
1863  else
1864    err = cipher_decrypt (h, out, outsize, in, inlen);
1865
1866  return gcry_error (err);
1867}
1868
1869
1870
1871/****************
1872 * Used for PGP's somewhat strange CFB mode. Only works if
1873 * the corresponding flag is set.
1874 */
1875static void
1876cipher_sync (gcry_cipher_hd_t c)
1877{
1878  if ((c->flags & GCRY_CIPHER_ENABLE_SYNC) && c->unused)
1879    {
1880      memmove (c->u_iv.iv + c->unused,
1881               c->u_iv.iv, c->cipher->blocksize - c->unused);
1882      memcpy (c->u_iv.iv,
1883              c->lastiv + c->cipher->blocksize - c->unused, c->unused);
1884      c->unused = 0;
1885    }
1886}
1887
1888
1889gcry_error_t
1890_gcry_cipher_setkey (gcry_cipher_hd_t hd, const void *key, size_t keylen)
1891{
1892  return cipher_setkey (hd, (void*)key, keylen);
1893}
1894
1895
1896gcry_error_t
1897_gcry_cipher_setiv (gcry_cipher_hd_t hd, const void *iv, size_t ivlen)
1898{
1899  cipher_setiv (hd, iv, ivlen);
1900  return 0;
1901}
1902
1903/* Set counter for CTR mode.  (CTR,CTRLEN) must denote a buffer of
1904   block size length, or (NULL,0) to set the CTR to the all-zero
1905   block. */
1906gpg_error_t
1907_gcry_cipher_setctr (gcry_cipher_hd_t hd, const void *ctr, size_t ctrlen)
1908{
1909  if (ctr && ctrlen == hd->cipher->blocksize)
1910    {
1911      memcpy (hd->u_ctr.ctr, ctr, hd->cipher->blocksize);
1912      hd->unused = 0;
1913    }
1914  else if (!ctr || !ctrlen)
1915    {
1916      memset (hd->u_ctr.ctr, 0, hd->cipher->blocksize);
1917      hd->unused = 0;
1918    }
1919  else
1920    return gpg_error (GPG_ERR_INV_ARG);
1921  return 0;
1922}
1923
1924
1925gcry_error_t
1926gcry_cipher_ctl( gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen)
1927{
1928  gcry_err_code_t rc = GPG_ERR_NO_ERROR;
1929
1930  switch (cmd)
1931    {
1932    case GCRYCTL_SET_KEY:  /* Deprecated; use gcry_cipher_setkey.  */
1933      rc = cipher_setkey( h, buffer, buflen );
1934      break;
1935
1936    case GCRYCTL_SET_IV:   /* Deprecated; use gcry_cipher_setiv.  */
1937      cipher_setiv( h, buffer, buflen );
1938      break;
1939
1940    case GCRYCTL_RESET:
1941      cipher_reset (h);
1942      break;
1943
1944    case GCRYCTL_CFB_SYNC:
1945      cipher_sync( h );
1946      break;
1947
1948    case GCRYCTL_SET_CBC_CTS:
1949      if (buflen)
1950	if (h->flags & GCRY_CIPHER_CBC_MAC)
1951	  rc = GPG_ERR_INV_FLAG;
1952	else
1953	  h->flags |= GCRY_CIPHER_CBC_CTS;
1954      else
1955	h->flags &= ~GCRY_CIPHER_CBC_CTS;
1956      break;
1957
1958    case GCRYCTL_SET_CBC_MAC:
1959      if (buflen)
1960	if (h->flags & GCRY_CIPHER_CBC_CTS)
1961	  rc = GPG_ERR_INV_FLAG;
1962	else
1963	  h->flags |= GCRY_CIPHER_CBC_MAC;
1964      else
1965	h->flags &= ~GCRY_CIPHER_CBC_MAC;
1966      break;
1967
1968    case GCRYCTL_DISABLE_ALGO:
1969      /* This command expects NULL for H and BUFFER to point to an
1970         integer with the algo number.  */
1971      if( h || !buffer || buflen != sizeof(int) )
1972	return gcry_error (GPG_ERR_CIPHER_ALGO);
1973      disable_cipher_algo( *(int*)buffer );
1974      break;
1975
1976    case GCRYCTL_SET_CTR: /* Deprecated; use gcry_cipher_setctr.  */
1977      rc = gpg_err_code (_gcry_cipher_setctr (h, buffer, buflen));
1978      break;
1979
1980    case 61:  /* Disable weak key detection (private).  */
1981      if (h->extraspec->set_extra_info)
1982        rc = h->extraspec->set_extra_info
1983          (&h->context.c, CIPHER_INFO_NO_WEAK_KEY, NULL, 0);
1984      else
1985        rc = GPG_ERR_NOT_SUPPORTED;
1986      break;
1987
1988    case 62: /* Return current input vector (private).  */
1989      /* This is the input block as used in CFB and OFB mode which has
1990         initially been set as IV.  The returned format is:
1991           1 byte  Actual length of the block in bytes.
1992           n byte  The block.
1993         If the provided buffer is too short, an error is returned. */
1994      if (buflen < (1 + h->cipher->blocksize))
1995        rc = GPG_ERR_TOO_SHORT;
1996      else
1997        {
1998          unsigned char *ivp;
1999          unsigned char *dst = buffer;
2000          int n = h->unused;
2001
2002          if (!n)
2003            n = h->cipher->blocksize;
2004          gcry_assert (n <= h->cipher->blocksize);
2005          *dst++ = n;
2006          ivp = h->u_iv.iv + h->cipher->blocksize - n;
2007          while (n--)
2008            *dst++ = *ivp++;
2009        }
2010      break;
2011
2012    default:
2013      rc = GPG_ERR_INV_OP;
2014    }
2015
2016  return gcry_error (rc);
2017}
2018
2019
2020/* Return information about the cipher handle H.  CMD is the kind of
2021   information requested.  BUFFER and NBYTES are reserved for now.
2022
2023   There are no values for CMD yet defined.
2024
2025   The function always returns GPG_ERR_INV_OP.
2026
2027 */
2028gcry_error_t
2029gcry_cipher_info (gcry_cipher_hd_t h, int cmd, void *buffer, size_t *nbytes)
2030{
2031  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2032
2033  (void)h;
2034  (void)buffer;
2035  (void)nbytes;
2036
2037  switch (cmd)
2038    {
2039    default:
2040      err = GPG_ERR_INV_OP;
2041    }
2042
2043  return gcry_error (err);
2044}
2045
2046/* Return information about the given cipher algorithm ALGO.
2047
2048   WHAT select the kind of information returned:
2049
2050    GCRYCTL_GET_KEYLEN:
2051  	Return the length of the key.  If the algorithm ALGO
2052  	supports multiple key lengths, the maximum supported key length
2053  	is returned.  The key length is returned as number of octets.
2054  	BUFFER and NBYTES must be zero.
2055
2056    GCRYCTL_GET_BLKLEN:
2057  	Return the blocklength of the algorithm ALGO counted in octets.
2058  	BUFFER and NBYTES must be zero.
2059
2060    GCRYCTL_TEST_ALGO:
2061  	Returns 0 if the specified algorithm ALGO is available for use.
2062  	BUFFER and NBYTES must be zero.
2063
2064   Note: Because this function is in most cases used to return an
2065   integer value, we can make it easier for the caller to just look at
2066   the return value.  The caller will in all cases consult the value
2067   and thereby detecting whether a error occurred or not (i.e. while
2068   checking the block size)
2069 */
2070gcry_error_t
2071gcry_cipher_algo_info (int algo, int what, void *buffer, size_t *nbytes)
2072{
2073  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2074  unsigned int ui;
2075
2076  switch (what)
2077    {
2078    case GCRYCTL_GET_KEYLEN:
2079      if (buffer || (! nbytes))
2080	err = GPG_ERR_CIPHER_ALGO;
2081      else
2082	{
2083	  ui = cipher_get_keylen (algo);
2084	  if ((ui > 0) && (ui <= 512))
2085	    *nbytes = (size_t) ui / 8;
2086	  else
2087	    /* The only reason is an invalid algo or a strange
2088	       blocksize.  */
2089	    err = GPG_ERR_CIPHER_ALGO;
2090	}
2091      break;
2092
2093    case GCRYCTL_GET_BLKLEN:
2094      if (buffer || (! nbytes))
2095	err = GPG_ERR_CIPHER_ALGO;
2096      else
2097	{
2098	  ui = cipher_get_blocksize (algo);
2099	  if ((ui > 0) && (ui < 10000))
2100	    *nbytes = ui;
2101	  else
2102	    /* The only reason is an invalid algo or a strange
2103	       blocksize.  */
2104	    err = GPG_ERR_CIPHER_ALGO;
2105	}
2106      break;
2107
2108    case GCRYCTL_TEST_ALGO:
2109      if (buffer || nbytes)
2110	err = GPG_ERR_INV_ARG;
2111      else
2112	err = check_cipher_algo (algo);
2113      break;
2114
2115      default:
2116	err = GPG_ERR_INV_OP;
2117    }
2118
2119  return gcry_error (err);
2120}
2121
2122
2123/* This function returns length of the key for algorithm ALGO.  If the
2124   algorithm supports multiple key lengths, the maximum supported key
2125   length is returned.  On error 0 is returned.  The key length is
2126   returned as number of octets.
2127
2128   This is a convenience functions which should be preferred over
2129   gcry_cipher_algo_info because it allows for proper type
2130   checking.  */
2131size_t
2132gcry_cipher_get_algo_keylen (int algo)
2133{
2134  size_t n;
2135
2136  if (gcry_cipher_algo_info (algo, GCRYCTL_GET_KEYLEN, NULL, &n))
2137    n = 0;
2138  return n;
2139}
2140
2141/* This functions returns the blocklength of the algorithm ALGO
2142   counted in octets.  On error 0 is returned.
2143
2144   This is a convenience functions which should be preferred over
2145   gcry_cipher_algo_info because it allows for proper type
2146   checking.  */
2147size_t
2148gcry_cipher_get_algo_blklen (int algo)
2149{
2150  size_t n;
2151
2152  if (gcry_cipher_algo_info( algo, GCRYCTL_GET_BLKLEN, NULL, &n))
2153    n = 0;
2154  return n;
2155}
2156
2157/* Explicitly initialize this module.  */
2158gcry_err_code_t
2159_gcry_cipher_init (void)
2160{
2161  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2162
2163  REGISTER_DEFAULT_CIPHERS;
2164
2165  return err;
2166}
2167
2168/* Get a list consisting of the IDs of the loaded cipher modules.  If
2169   LIST is zero, write the number of loaded cipher modules to
2170   LIST_LENGTH and return.  If LIST is non-zero, the first
2171   *LIST_LENGTH algorithm IDs are stored in LIST, which must be of
2172   according size.  In case there are less cipher modules than
2173   *LIST_LENGTH, *LIST_LENGTH is updated to the correct number.  */
2174gcry_error_t
2175gcry_cipher_list (int *list, int *list_length)
2176{
2177  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2178
2179  ath_mutex_lock (&ciphers_registered_lock);
2180  err = _gcry_module_list (ciphers_registered, list, list_length);
2181  ath_mutex_unlock (&ciphers_registered_lock);
2182
2183  return err;
2184}
2185
2186
2187/* Run the selftests for cipher algorithm ALGO with optional reporting
2188   function REPORT.  */
2189gpg_error_t
2190_gcry_cipher_selftest (int algo, int extended, selftest_report_func_t report)
2191{
2192  gcry_module_t module = NULL;
2193  cipher_extra_spec_t *extraspec = NULL;
2194  gcry_err_code_t ec = 0;
2195
2196  REGISTER_DEFAULT_CIPHERS;
2197
2198  ath_mutex_lock (&ciphers_registered_lock);
2199  module = _gcry_module_lookup_id (ciphers_registered, algo);
2200  if (module && !(module->flags & FLAG_MODULE_DISABLED))
2201    extraspec = module->extraspec;
2202  ath_mutex_unlock (&ciphers_registered_lock);
2203  if (extraspec && extraspec->selftest)
2204    ec = extraspec->selftest (algo, extended, report);
2205  else
2206    {
2207      ec = GPG_ERR_CIPHER_ALGO;
2208      if (report)
2209        report ("cipher", algo, "module",
2210                module && !(module->flags & FLAG_MODULE_DISABLED)?
2211                "no selftest available" :
2212                module? "algorithm disabled" : "algorithm not found");
2213    }
2214
2215  if (module)
2216    {
2217      ath_mutex_lock (&ciphers_registered_lock);
2218      _gcry_module_release (module);
2219      ath_mutex_unlock (&ciphers_registered_lock);
2220    }
2221  return gpg_error (ec);
2222}
2223