• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/router/libgcrypt-1.5.1/cipher/
1/* cipher.c  -	cipher dispatcher
2 * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
3 *               2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
4 *
5 * This file is part of Libgcrypt.
6 *
7 * Libgcrypt is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser general Public License as
9 * published by the Free Software Foundation; either version 2.1 of
10 * the License, or (at your option) any later version.
11 *
12 * Libgcrypt is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <config.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <errno.h>
26
27#include "g10lib.h"
28#include "cipher.h"
29#include "ath.h"
30
31#define MAX_BLOCKSIZE 16
32#define TABLE_SIZE 14
33#define CTX_MAGIC_NORMAL 0x24091964
34#define CTX_MAGIC_SECURE 0x46919042
35
36/* Try to use 16 byte aligned cipher context for better performance.
37   We use the aligned attribute, thus it is only possible to implement
38   this with gcc.  */
39#undef NEED_16BYTE_ALIGNED_CONTEXT
40#if defined (__GNUC__)
41# define NEED_16BYTE_ALIGNED_CONTEXT 1
42#endif
43
44/* A dummy extraspec so that we do not need to tests the extraspec
45   field from the module specification against NULL and instead
46   directly test the respective fields of extraspecs.  */
47static cipher_extra_spec_t dummy_extra_spec;
48
49/* This is the list of the default ciphers, which are included in
50   libgcrypt.  */
51static struct cipher_table_entry
52{
53  gcry_cipher_spec_t *cipher;
54  cipher_extra_spec_t *extraspec;
55  unsigned int algorithm;
56  int fips_allowed;
57} cipher_table[] =
58  {
59#if USE_BLOWFISH
60    { &_gcry_cipher_spec_blowfish,
61      &dummy_extra_spec,                  GCRY_CIPHER_BLOWFISH },
62#endif
63#if USE_DES
64    { &_gcry_cipher_spec_des,
65      &dummy_extra_spec,                  GCRY_CIPHER_DES },
66    { &_gcry_cipher_spec_tripledes,
67      &_gcry_cipher_extraspec_tripledes,  GCRY_CIPHER_3DES, 1 },
68#endif
69#if USE_ARCFOUR
70    { &_gcry_cipher_spec_arcfour,
71      &dummy_extra_spec,                  GCRY_CIPHER_ARCFOUR },
72#endif
73#if USE_CAST5
74    { &_gcry_cipher_spec_cast5,
75      &dummy_extra_spec,                  GCRY_CIPHER_CAST5 },
76#endif
77#if USE_AES
78    { &_gcry_cipher_spec_aes,
79      &_gcry_cipher_extraspec_aes,        GCRY_CIPHER_AES,    1 },
80    { &_gcry_cipher_spec_aes192,
81      &_gcry_cipher_extraspec_aes192,     GCRY_CIPHER_AES192, 1 },
82    { &_gcry_cipher_spec_aes256,
83      &_gcry_cipher_extraspec_aes256,     GCRY_CIPHER_AES256, 1 },
84#endif
85#if USE_TWOFISH
86    { &_gcry_cipher_spec_twofish,
87      &dummy_extra_spec,                  GCRY_CIPHER_TWOFISH },
88    { &_gcry_cipher_spec_twofish128,
89      &dummy_extra_spec,                  GCRY_CIPHER_TWOFISH128 },
90#endif
91#if USE_SERPENT
92    { &_gcry_cipher_spec_serpent128,
93      &dummy_extra_spec,                  GCRY_CIPHER_SERPENT128 },
94    { &_gcry_cipher_spec_serpent192,
95      &dummy_extra_spec,                  GCRY_CIPHER_SERPENT192 },
96    { &_gcry_cipher_spec_serpent256,
97      &dummy_extra_spec,                  GCRY_CIPHER_SERPENT256 },
98#endif
99#if USE_RFC2268
100    { &_gcry_cipher_spec_rfc2268_40,
101      &dummy_extra_spec,                  GCRY_CIPHER_RFC2268_40 },
102#endif
103#if USE_SEED
104    { &_gcry_cipher_spec_seed,
105      &dummy_extra_spec,                  GCRY_CIPHER_SEED },
106#endif
107#if USE_CAMELLIA
108    { &_gcry_cipher_spec_camellia128,
109      &dummy_extra_spec,                  GCRY_CIPHER_CAMELLIA128 },
110    { &_gcry_cipher_spec_camellia192,
111      &dummy_extra_spec,                  GCRY_CIPHER_CAMELLIA192 },
112    { &_gcry_cipher_spec_camellia256,
113      &dummy_extra_spec,                  GCRY_CIPHER_CAMELLIA256 },
114#endif
115    { NULL                    }
116  };
117
118/* List of registered ciphers.  */
119static gcry_module_t ciphers_registered;
120
121/* This is the lock protecting CIPHERS_REGISTERED.  */
122static ath_mutex_t ciphers_registered_lock = ATH_MUTEX_INITIALIZER;
123
124/* Flag to check whether the default ciphers have already been
125   registered.  */
126static int default_ciphers_registered;
127
128/* Convenient macro for registering the default ciphers.  */
129#define REGISTER_DEFAULT_CIPHERS                   \
130  do                                               \
131    {                                              \
132      ath_mutex_lock (&ciphers_registered_lock);   \
133      if (! default_ciphers_registered)            \
134        {                                          \
135          cipher_register_default ();              \
136          default_ciphers_registered = 1;          \
137        }                                          \
138      ath_mutex_unlock (&ciphers_registered_lock); \
139    }                                              \
140  while (0)
141
142
143/* A VIA processor with the Padlock engine as well as the Intel AES_NI
144   instructions require an alignment of most data on a 16 byte
145   boundary.  Because we trick out the compiler while allocating the
146   context, the align attribute as used in rijndael.c does not work on
147   its own.  Thus we need to make sure that the entire context
148   structure is a aligned on that boundary.  We achieve this by
149   defining a new type and use that instead of our usual alignment
150   type.  */
151typedef union
152{
153  PROPERLY_ALIGNED_TYPE foo;
154#ifdef NEED_16BYTE_ALIGNED_CONTEXT
155  char bar[16] __attribute__ ((aligned (16)));
156#endif
157  char c[1];
158} cipher_context_alignment_t;
159
160
161/* The handle structure.  */
162struct gcry_cipher_handle
163{
164  int magic;
165  size_t actual_handle_size;     /* Allocated size of this handle. */
166  size_t handle_offset;          /* Offset to the malloced block.  */
167  gcry_cipher_spec_t *cipher;
168  cipher_extra_spec_t *extraspec;
169  gcry_module_t module;
170
171  /* The algorithm id.  This is a hack required because the module
172     interface does not easily allow to retrieve this value. */
173  int algo;
174
175  /* A structure with function pointers for bulk operations.  Due to
176     limitations of the module system (we don't want to change the
177     API) we need to keep these function pointers here.  The cipher
178     open function intializes them and the actual encryption routines
179     use them if they are not NULL.  */
180  struct {
181    void (*cfb_enc)(void *context, unsigned char *iv,
182                    void *outbuf_arg, const void *inbuf_arg,
183                    unsigned int nblocks);
184    void (*cfb_dec)(void *context, unsigned char *iv,
185                    void *outbuf_arg, const void *inbuf_arg,
186                    unsigned int nblocks);
187    void (*cbc_enc)(void *context, unsigned char *iv,
188                    void *outbuf_arg, const void *inbuf_arg,
189                    unsigned int nblocks, int cbc_mac);
190    void (*cbc_dec)(void *context, unsigned char *iv,
191                    void *outbuf_arg, const void *inbuf_arg,
192                    unsigned int nblocks);
193    void (*ctr_enc)(void *context, unsigned char *iv,
194                    void *outbuf_arg, const void *inbuf_arg,
195                    unsigned int nblocks);
196  } bulk;
197
198
199  int mode;
200  unsigned int flags;
201
202  struct {
203    unsigned int key:1; /* Set to 1 if a key has been set.  */
204    unsigned int iv:1;  /* Set to 1 if a IV has been set.  */
205  } marks;
206
207  /* The initialization vector.  For best performance we make sure
208     that it is properly aligned.  In particular some implementations
209     of bulk operations expect an 16 byte aligned IV.  */
210  union {
211    cipher_context_alignment_t iv_align;
212    unsigned char iv[MAX_BLOCKSIZE];
213  } u_iv;
214
215  /* The counter for CTR mode.  This field is also used by AESWRAP and
216     thus we can't use the U_IV union.  */
217  union {
218    cipher_context_alignment_t iv_align;
219    unsigned char ctr[MAX_BLOCKSIZE];
220  } u_ctr;
221
222  /* Space to save an IV or CTR for chaining operations.  */
223  unsigned char lastiv[MAX_BLOCKSIZE];
224  int unused;  /* Number of unused bytes in LASTIV. */
225
226  /* What follows are two contexts of the cipher in use.  The first
227     one needs to be aligned well enough for the cipher operation
228     whereas the second one is a copy created by cipher_setkey and
229     used by cipher_reset.  That second copy has no need for proper
230     aligment because it is only accessed by memcpy.  */
231  cipher_context_alignment_t context;
232};
233
234
235
236/* These dummy functions are used in case a cipher implementation
237   refuses to provide it's own functions.  */
238
239static gcry_err_code_t
240dummy_setkey (void *c, const unsigned char *key, unsigned int keylen)
241{
242  (void)c;
243  (void)key;
244  (void)keylen;
245  return GPG_ERR_NO_ERROR;
246}
247
248static void
249dummy_encrypt_block (void *c,
250		     unsigned char *outbuf, const unsigned char *inbuf)
251{
252  (void)c;
253  (void)outbuf;
254  (void)inbuf;
255  BUG();
256}
257
258static void
259dummy_decrypt_block (void *c,
260		     unsigned char *outbuf, const unsigned char *inbuf)
261{
262  (void)c;
263  (void)outbuf;
264  (void)inbuf;
265  BUG();
266}
267
268static void
269dummy_encrypt_stream (void *c,
270		      unsigned char *outbuf, const unsigned char *inbuf,
271		      unsigned int n)
272{
273  (void)c;
274  (void)outbuf;
275  (void)inbuf;
276  (void)n;
277  BUG();
278}
279
280static void
281dummy_decrypt_stream (void *c,
282		      unsigned char *outbuf, const unsigned char *inbuf,
283		      unsigned int n)
284{
285  (void)c;
286  (void)outbuf;
287  (void)inbuf;
288  (void)n;
289  BUG();
290}
291
292
293/* Internal function.  Register all the ciphers included in
294   CIPHER_TABLE.  Note, that this function gets only used by the macro
295   REGISTER_DEFAULT_CIPHERS which protects it using a mutex. */
296static void
297cipher_register_default (void)
298{
299  gcry_err_code_t err = GPG_ERR_NO_ERROR;
300  int i;
301
302  for (i = 0; !err && cipher_table[i].cipher; i++)
303    {
304      if (! cipher_table[i].cipher->setkey)
305	cipher_table[i].cipher->setkey = dummy_setkey;
306      if (! cipher_table[i].cipher->encrypt)
307	cipher_table[i].cipher->encrypt = dummy_encrypt_block;
308      if (! cipher_table[i].cipher->decrypt)
309	cipher_table[i].cipher->decrypt = dummy_decrypt_block;
310      if (! cipher_table[i].cipher->stencrypt)
311	cipher_table[i].cipher->stencrypt = dummy_encrypt_stream;
312      if (! cipher_table[i].cipher->stdecrypt)
313	cipher_table[i].cipher->stdecrypt = dummy_decrypt_stream;
314
315      if ( fips_mode () && !cipher_table[i].fips_allowed )
316        continue;
317
318      err = _gcry_module_add (&ciphers_registered,
319			      cipher_table[i].algorithm,
320			      (void *) cipher_table[i].cipher,
321			      (void *) cipher_table[i].extraspec,
322			      NULL);
323    }
324
325  if (err)
326    BUG ();
327}
328
329/* Internal callback function.  Used via _gcry_module_lookup.  */
330static int
331gcry_cipher_lookup_func_name (void *spec, void *data)
332{
333  gcry_cipher_spec_t *cipher = (gcry_cipher_spec_t *) spec;
334  char *name = (char *) data;
335  const char **aliases = cipher->aliases;
336  int i, ret = ! stricmp (name, cipher->name);
337
338  if (aliases)
339    for (i = 0; aliases[i] && (! ret); i++)
340      ret = ! stricmp (name, aliases[i]);
341
342  return ret;
343}
344
345/* Internal callback function.  Used via _gcry_module_lookup.  */
346static int
347gcry_cipher_lookup_func_oid (void *spec, void *data)
348{
349  gcry_cipher_spec_t *cipher = (gcry_cipher_spec_t *) spec;
350  char *oid = (char *) data;
351  gcry_cipher_oid_spec_t *oid_specs = cipher->oids;
352  int ret = 0, i;
353
354  if (oid_specs)
355    for (i = 0; oid_specs[i].oid && (! ret); i++)
356      if (! stricmp (oid, oid_specs[i].oid))
357	ret = 1;
358
359  return ret;
360}
361
362/* Internal function.  Lookup a cipher entry by it's name.  */
363static gcry_module_t
364gcry_cipher_lookup_name (const char *name)
365{
366  gcry_module_t cipher;
367
368  cipher = _gcry_module_lookup (ciphers_registered, (void *) name,
369				gcry_cipher_lookup_func_name);
370
371  return cipher;
372}
373
374/* Internal function.  Lookup a cipher entry by it's oid.  */
375static gcry_module_t
376gcry_cipher_lookup_oid (const char *oid)
377{
378  gcry_module_t cipher;
379
380  cipher = _gcry_module_lookup (ciphers_registered, (void *) oid,
381				gcry_cipher_lookup_func_oid);
382
383  return cipher;
384}
385
386/* Register a new cipher module whose specification can be found in
387   CIPHER.  On success, a new algorithm ID is stored in ALGORITHM_ID
388   and a pointer representhing this module is stored in MODULE.  */
389gcry_error_t
390_gcry_cipher_register (gcry_cipher_spec_t *cipher,
391                       cipher_extra_spec_t *extraspec,
392                       int *algorithm_id,
393                       gcry_module_t *module)
394{
395  gcry_err_code_t err = 0;
396  gcry_module_t mod;
397
398  /* We do not support module loading in fips mode.  */
399  if (fips_mode ())
400    return gpg_error (GPG_ERR_NOT_SUPPORTED);
401
402  ath_mutex_lock (&ciphers_registered_lock);
403  err = _gcry_module_add (&ciphers_registered, 0,
404			  (void *)cipher,
405			  (void *)(extraspec? extraspec : &dummy_extra_spec),
406                          &mod);
407  ath_mutex_unlock (&ciphers_registered_lock);
408
409  if (! err)
410    {
411      *module = mod;
412      *algorithm_id = mod->mod_id;
413    }
414
415  return gcry_error (err);
416}
417
418/* Unregister the cipher identified by MODULE, which must have been
419   registered with gcry_cipher_register.  */
420void
421gcry_cipher_unregister (gcry_module_t module)
422{
423  ath_mutex_lock (&ciphers_registered_lock);
424  _gcry_module_release (module);
425  ath_mutex_unlock (&ciphers_registered_lock);
426}
427
428/* Locate the OID in the oid table and return the index or -1 when not
429   found.  An opitonal "oid." or "OID." prefix in OID is ignored, the
430   OID is expected to be in standard IETF dotted notation.  The
431   internal algorithm number is returned in ALGORITHM unless it
432   ispassed as NULL.  A pointer to the specification of the module
433   implementing this algorithm is return in OID_SPEC unless passed as
434   NULL.*/
435static int
436search_oid (const char *oid, int *algorithm, gcry_cipher_oid_spec_t *oid_spec)
437{
438  gcry_module_t module;
439  int ret = 0;
440
441  if (oid && ((! strncmp (oid, "oid.", 4))
442	      || (! strncmp (oid, "OID.", 4))))
443    oid += 4;
444
445  module = gcry_cipher_lookup_oid (oid);
446  if (module)
447    {
448      gcry_cipher_spec_t *cipher = module->spec;
449      int i;
450
451      for (i = 0; cipher->oids[i].oid && !ret; i++)
452	if (! stricmp (oid, cipher->oids[i].oid))
453	  {
454	    if (algorithm)
455	      *algorithm = module->mod_id;
456	    if (oid_spec)
457	      *oid_spec = cipher->oids[i];
458	    ret = 1;
459	  }
460      _gcry_module_release (module);
461    }
462
463  return ret;
464}
465
466/* Map STRING to the cipher algorithm identifier.  Returns the
467   algorithm ID of the cipher for the given name or 0 if the name is
468   not known.  It is valid to pass NULL for STRING which results in a
469   return value of 0. */
470int
471gcry_cipher_map_name (const char *string)
472{
473  gcry_module_t cipher;
474  int ret, algorithm = 0;
475
476  if (! string)
477    return 0;
478
479  REGISTER_DEFAULT_CIPHERS;
480
481  /* If the string starts with a digit (optionally prefixed with
482     either "OID." or "oid."), we first look into our table of ASN.1
483     object identifiers to figure out the algorithm */
484
485  ath_mutex_lock (&ciphers_registered_lock);
486
487  ret = search_oid (string, &algorithm, NULL);
488  if (! ret)
489    {
490      cipher = gcry_cipher_lookup_name (string);
491      if (cipher)
492	{
493	  algorithm = cipher->mod_id;
494	  _gcry_module_release (cipher);
495	}
496    }
497
498  ath_mutex_unlock (&ciphers_registered_lock);
499
500  return algorithm;
501}
502
503
504/* Given a STRING with an OID in dotted decimal notation, this
505   function returns the cipher mode (GCRY_CIPHER_MODE_*) associated
506   with that OID or 0 if no mode is known.  Passing NULL for string
507   yields a return value of 0. */
508int
509gcry_cipher_mode_from_oid (const char *string)
510{
511  gcry_cipher_oid_spec_t oid_spec;
512  int ret = 0, mode = 0;
513
514  if (!string)
515    return 0;
516
517  ath_mutex_lock (&ciphers_registered_lock);
518  ret = search_oid (string, NULL, &oid_spec);
519  if (ret)
520    mode = oid_spec.mode;
521  ath_mutex_unlock (&ciphers_registered_lock);
522
523  return mode;
524}
525
526
527/* Map the cipher algorithm whose ID is contained in ALGORITHM to a
528   string representation of the algorithm name.  For unknown algorithm
529   IDs this function returns "?".  */
530static const char *
531cipher_algo_to_string (int algorithm)
532{
533  gcry_module_t cipher;
534  const char *name;
535
536  REGISTER_DEFAULT_CIPHERS;
537
538  ath_mutex_lock (&ciphers_registered_lock);
539  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
540  if (cipher)
541    {
542      name = ((gcry_cipher_spec_t *) cipher->spec)->name;
543      _gcry_module_release (cipher);
544    }
545  else
546    name = "?";
547  ath_mutex_unlock (&ciphers_registered_lock);
548
549  return name;
550}
551
552/* Map the cipher algorithm identifier ALGORITHM to a string
553   representing this algorithm.  This string is the default name as
554   used by Libgcrypt.  An pointer to an empty string is returned for
555   an unknown algorithm.  NULL is never returned. */
556const char *
557gcry_cipher_algo_name (int algorithm)
558{
559  return cipher_algo_to_string (algorithm);
560}
561
562
563/* Flag the cipher algorithm with the identifier ALGORITHM as
564   disabled.  There is no error return, the function does nothing for
565   unknown algorithms.  Disabled algorithms are vitually not available
566   in Libgcrypt. */
567static void
568disable_cipher_algo (int algorithm)
569{
570  gcry_module_t cipher;
571
572  REGISTER_DEFAULT_CIPHERS;
573
574  ath_mutex_lock (&ciphers_registered_lock);
575  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
576  if (cipher)
577    {
578      if (! (cipher->flags & FLAG_MODULE_DISABLED))
579	cipher->flags |= FLAG_MODULE_DISABLED;
580      _gcry_module_release (cipher);
581    }
582  ath_mutex_unlock (&ciphers_registered_lock);
583}
584
585
586/* Return 0 if the cipher algorithm with identifier ALGORITHM is
587   available. Returns a basic error code value if it is not
588   available.  */
589static gcry_err_code_t
590check_cipher_algo (int algorithm)
591{
592  gcry_err_code_t err = GPG_ERR_NO_ERROR;
593  gcry_module_t cipher;
594
595  REGISTER_DEFAULT_CIPHERS;
596
597  ath_mutex_lock (&ciphers_registered_lock);
598  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
599  if (cipher)
600    {
601      if (cipher->flags & FLAG_MODULE_DISABLED)
602	err = GPG_ERR_CIPHER_ALGO;
603      _gcry_module_release (cipher);
604    }
605  else
606    err = GPG_ERR_CIPHER_ALGO;
607  ath_mutex_unlock (&ciphers_registered_lock);
608
609  return err;
610}
611
612
613/* Return the standard length in bits of the key for the cipher
614   algorithm with the identifier ALGORITHM.  */
615static unsigned int
616cipher_get_keylen (int algorithm)
617{
618  gcry_module_t cipher;
619  unsigned len = 0;
620
621  REGISTER_DEFAULT_CIPHERS;
622
623  ath_mutex_lock (&ciphers_registered_lock);
624  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
625  if (cipher)
626    {
627      len = ((gcry_cipher_spec_t *) cipher->spec)->keylen;
628      if (!len)
629	log_bug ("cipher %d w/o key length\n", algorithm);
630      _gcry_module_release (cipher);
631    }
632  ath_mutex_unlock (&ciphers_registered_lock);
633
634  return len;
635}
636
637/* Return the block length of the cipher algorithm with the identifier
638   ALGORITHM.  This function return 0 for an invalid algorithm.  */
639static unsigned int
640cipher_get_blocksize (int algorithm)
641{
642  gcry_module_t cipher;
643  unsigned len = 0;
644
645  REGISTER_DEFAULT_CIPHERS;
646
647  ath_mutex_lock (&ciphers_registered_lock);
648  cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
649  if (cipher)
650    {
651      len = ((gcry_cipher_spec_t *) cipher->spec)->blocksize;
652      if (! len)
653	  log_bug ("cipher %d w/o blocksize\n", algorithm);
654      _gcry_module_release (cipher);
655    }
656  ath_mutex_unlock (&ciphers_registered_lock);
657
658  return len;
659}
660
661
662/*
663   Open a cipher handle for use with cipher algorithm ALGORITHM, using
664   the cipher mode MODE (one of the GCRY_CIPHER_MODE_*) and return a
665   handle in HANDLE.  Put NULL into HANDLE and return an error code if
666   something goes wrong.  FLAGS may be used to modify the
667   operation.  The defined flags are:
668
669   GCRY_CIPHER_SECURE:  allocate all internal buffers in secure memory.
670   GCRY_CIPHER_ENABLE_SYNC:  Enable the sync operation as used in OpenPGP.
671   GCRY_CIPHER_CBC_CTS:  Enable CTS mode.
672   GCRY_CIPHER_CBC_MAC:  Enable MAC mode.
673
674   Values for these flags may be combined using OR.
675 */
676gcry_error_t
677gcry_cipher_open (gcry_cipher_hd_t *handle,
678		  int algo, int mode, unsigned int flags)
679{
680  int secure = (flags & GCRY_CIPHER_SECURE);
681  gcry_cipher_spec_t *cipher = NULL;
682  cipher_extra_spec_t *extraspec = NULL;
683  gcry_module_t module = NULL;
684  gcry_cipher_hd_t h = NULL;
685  gcry_err_code_t err = 0;
686
687  /* If the application missed to call the random poll function, we do
688     it here to ensure that it is used once in a while. */
689  _gcry_fast_random_poll ();
690
691  REGISTER_DEFAULT_CIPHERS;
692
693  /* Fetch the according module and check whether the cipher is marked
694     available for use.  */
695  ath_mutex_lock (&ciphers_registered_lock);
696  module = _gcry_module_lookup_id (ciphers_registered, algo);
697  if (module)
698    {
699      /* Found module.  */
700
701      if (module->flags & FLAG_MODULE_DISABLED)
702	{
703	  /* Not available for use.  */
704	  err = GPG_ERR_CIPHER_ALGO;
705	}
706      else
707        {
708          cipher = (gcry_cipher_spec_t *) module->spec;
709          extraspec = module->extraspec;
710        }
711    }
712  else
713    err = GPG_ERR_CIPHER_ALGO;
714  ath_mutex_unlock (&ciphers_registered_lock);
715
716  /* check flags */
717  if ((! err)
718      && ((flags & ~(0
719		     | GCRY_CIPHER_SECURE
720		     | GCRY_CIPHER_ENABLE_SYNC
721		     | GCRY_CIPHER_CBC_CTS
722		     | GCRY_CIPHER_CBC_MAC))
723	  || (flags & GCRY_CIPHER_CBC_CTS & GCRY_CIPHER_CBC_MAC)))
724    err = GPG_ERR_CIPHER_ALGO;
725
726  /* check that a valid mode has been requested */
727  if (! err)
728    switch (mode)
729      {
730      case GCRY_CIPHER_MODE_ECB:
731      case GCRY_CIPHER_MODE_CBC:
732      case GCRY_CIPHER_MODE_CFB:
733      case GCRY_CIPHER_MODE_OFB:
734      case GCRY_CIPHER_MODE_CTR:
735      case GCRY_CIPHER_MODE_AESWRAP:
736	if ((cipher->encrypt == dummy_encrypt_block)
737	    || (cipher->decrypt == dummy_decrypt_block))
738	  err = GPG_ERR_INV_CIPHER_MODE;
739	break;
740
741      case GCRY_CIPHER_MODE_STREAM:
742	if ((cipher->stencrypt == dummy_encrypt_stream)
743	    || (cipher->stdecrypt == dummy_decrypt_stream))
744	  err = GPG_ERR_INV_CIPHER_MODE;
745	break;
746
747      case GCRY_CIPHER_MODE_NONE:
748        /* This mode may be used for debugging.  It copies the main
749           text verbatim to the ciphertext.  We do not allow this in
750           fips mode or if no debug flag has been set.  */
751	if (fips_mode () || !_gcry_get_debug_flag (0))
752          err = GPG_ERR_INV_CIPHER_MODE;
753	break;
754
755      default:
756	err = GPG_ERR_INV_CIPHER_MODE;
757      }
758
759  /* Perform selftest here and mark this with a flag in cipher_table?
760     No, we should not do this as it takes too long.  Further it does
761     not make sense to exclude algorithms with failing selftests at
762     runtime: If a selftest fails there is something seriously wrong
763     with the system and thus we better die immediately. */
764
765  if (! err)
766    {
767      size_t size = (sizeof (*h)
768                     + 2 * cipher->contextsize
769                     - sizeof (cipher_context_alignment_t)
770#ifdef NEED_16BYTE_ALIGNED_CONTEXT
771                     + 15  /* Space for leading alignment gap.  */
772#endif /*NEED_16BYTE_ALIGNED_CONTEXT*/
773                     );
774
775      if (secure)
776	h = gcry_calloc_secure (1, size);
777      else
778	h = gcry_calloc (1, size);
779
780      if (! h)
781	err = gpg_err_code_from_syserror ();
782      else
783	{
784          size_t off = 0;
785
786#ifdef NEED_16BYTE_ALIGNED_CONTEXT
787          if ( ((unsigned long)h & 0x0f) )
788            {
789              /* The malloced block is not aligned on a 16 byte
790                 boundary.  Correct for this.  */
791              off = 16 - ((unsigned long)h & 0x0f);
792              h = (void*)((char*)h + off);
793            }
794#endif /*NEED_16BYTE_ALIGNED_CONTEXT*/
795
796	  h->magic = secure ? CTX_MAGIC_SECURE : CTX_MAGIC_NORMAL;
797          h->actual_handle_size = size - off;
798          h->handle_offset = off;
799	  h->cipher = cipher;
800	  h->extraspec = extraspec;
801	  h->module = module;
802          h->algo = algo;
803	  h->mode = mode;
804	  h->flags = flags;
805
806          /* Setup bulk encryption routines.  */
807          switch (algo)
808            {
809#ifdef USE_AES
810            case GCRY_CIPHER_AES128:
811            case GCRY_CIPHER_AES192:
812            case GCRY_CIPHER_AES256:
813              h->bulk.cfb_enc = _gcry_aes_cfb_enc;
814              h->bulk.cfb_dec = _gcry_aes_cfb_dec;
815              h->bulk.cbc_enc = _gcry_aes_cbc_enc;
816              h->bulk.cbc_dec = _gcry_aes_cbc_dec;
817              h->bulk.ctr_enc = _gcry_aes_ctr_enc;
818              break;
819#endif /*USE_AES*/
820
821            default:
822              break;
823            }
824	}
825    }
826
827  /* Done.  */
828
829  if (err)
830    {
831      if (module)
832	{
833	  /* Release module.  */
834	  ath_mutex_lock (&ciphers_registered_lock);
835	  _gcry_module_release (module);
836	  ath_mutex_unlock (&ciphers_registered_lock);
837	}
838    }
839
840  *handle = err ? NULL : h;
841
842  return gcry_error (err);
843}
844
845
846/* Release all resources associated with the cipher handle H. H may be
847   NULL in which case this is a no-operation. */
848void
849gcry_cipher_close (gcry_cipher_hd_t h)
850{
851  size_t off;
852
853  if (!h)
854    return;
855
856  if ((h->magic != CTX_MAGIC_SECURE)
857      && (h->magic != CTX_MAGIC_NORMAL))
858    _gcry_fatal_error(GPG_ERR_INTERNAL,
859		      "gcry_cipher_close: already closed/invalid handle");
860  else
861    h->magic = 0;
862
863  /* Release module.  */
864  ath_mutex_lock (&ciphers_registered_lock);
865  _gcry_module_release (h->module);
866  ath_mutex_unlock (&ciphers_registered_lock);
867
868  /* We always want to wipe out the memory even when the context has
869     been allocated in secure memory.  The user might have disabled
870     secure memory or is using his own implementation which does not
871     do the wiping.  To accomplish this we need to keep track of the
872     actual size of this structure because we have no way to known
873     how large the allocated area was when using a standard malloc. */
874  off = h->handle_offset;
875  wipememory (h, h->actual_handle_size);
876
877  gcry_free ((char*)h - off);
878}
879
880
881/* Set the key to be used for the encryption context C to KEY with
882   length KEYLEN.  The length should match the required length. */
883static gcry_error_t
884cipher_setkey (gcry_cipher_hd_t c, byte *key, unsigned int keylen)
885{
886  gcry_err_code_t ret;
887
888  ret = (*c->cipher->setkey) (&c->context.c, key, keylen);
889  if (!ret)
890    {
891      /* Duplicate initial context.  */
892      memcpy ((void *) ((char *) &c->context.c + c->cipher->contextsize),
893              (void *) &c->context.c,
894              c->cipher->contextsize);
895      c->marks.key = 1;
896    }
897  else
898    c->marks.key = 0;
899
900  return gcry_error (ret);
901}
902
903
904/* Set the IV to be used for the encryption context C to IV with
905   length IVLEN.  The length should match the required length. */
906static void
907cipher_setiv( gcry_cipher_hd_t c, const byte *iv, unsigned ivlen )
908{
909  memset (c->u_iv.iv, 0, c->cipher->blocksize);
910  if (iv)
911    {
912      if (ivlen != c->cipher->blocksize)
913        {
914          log_info ("WARNING: cipher_setiv: ivlen=%u blklen=%u\n",
915                    ivlen, (unsigned int)c->cipher->blocksize);
916          fips_signal_error ("IV length does not match blocklength");
917        }
918      if (ivlen > c->cipher->blocksize)
919        ivlen = c->cipher->blocksize;
920      memcpy (c->u_iv.iv, iv, ivlen);
921      c->marks.iv = 1;
922    }
923  else
924      c->marks.iv = 0;
925  c->unused = 0;
926}
927
928
929/* Reset the cipher context to the initial context.  This is basically
930   the same as an release followed by a new. */
931static void
932cipher_reset (gcry_cipher_hd_t c)
933{
934  memcpy (&c->context.c,
935	  (char *) &c->context.c + c->cipher->contextsize,
936	  c->cipher->contextsize);
937  memset (&c->marks, 0, sizeof c->marks);
938  memset (c->u_iv.iv, 0, c->cipher->blocksize);
939  memset (c->lastiv, 0, c->cipher->blocksize);
940  memset (c->u_ctr.ctr, 0, c->cipher->blocksize);
941}
942
943
944
945static gcry_err_code_t
946do_ecb_encrypt (gcry_cipher_hd_t c,
947                unsigned char *outbuf, unsigned int outbuflen,
948                const unsigned char *inbuf, unsigned int inbuflen)
949{
950  unsigned int blocksize = c->cipher->blocksize;
951  unsigned int n, nblocks;
952
953  if (outbuflen < inbuflen)
954    return GPG_ERR_BUFFER_TOO_SHORT;
955  if ((inbuflen % blocksize))
956    return GPG_ERR_INV_LENGTH;
957
958  nblocks = inbuflen / c->cipher->blocksize;
959
960  for (n=0; n < nblocks; n++ )
961    {
962      c->cipher->encrypt (&c->context.c, outbuf, (byte*)/*arggg*/inbuf);
963      inbuf  += blocksize;
964      outbuf += blocksize;
965    }
966  return 0;
967}
968
969static gcry_err_code_t
970do_ecb_decrypt (gcry_cipher_hd_t c,
971                unsigned char *outbuf, unsigned int outbuflen,
972                const unsigned char *inbuf, unsigned int inbuflen)
973{
974  unsigned int blocksize = c->cipher->blocksize;
975  unsigned int n, nblocks;
976
977  if (outbuflen < inbuflen)
978    return GPG_ERR_BUFFER_TOO_SHORT;
979  if ((inbuflen % blocksize))
980    return GPG_ERR_INV_LENGTH;
981  nblocks = inbuflen / c->cipher->blocksize;
982
983  for (n=0; n < nblocks; n++ )
984    {
985      c->cipher->decrypt (&c->context.c, outbuf, (byte*)/*arggg*/inbuf );
986      inbuf  += blocksize;
987      outbuf += blocksize;
988    }
989
990  return 0;
991}
992
993
994static gcry_err_code_t
995do_cbc_encrypt (gcry_cipher_hd_t c,
996                unsigned char *outbuf, unsigned int outbuflen,
997                const unsigned char *inbuf, unsigned int inbuflen)
998{
999  unsigned int n;
1000  unsigned char *ivp;
1001  int i;
1002  size_t blocksize = c->cipher->blocksize;
1003  unsigned nblocks = inbuflen / blocksize;
1004
1005  if (outbuflen < ((c->flags & GCRY_CIPHER_CBC_MAC)? blocksize : inbuflen))
1006    return GPG_ERR_BUFFER_TOO_SHORT;
1007
1008  if ((inbuflen % c->cipher->blocksize)
1009      && !(inbuflen > c->cipher->blocksize
1010           && (c->flags & GCRY_CIPHER_CBC_CTS)))
1011    return GPG_ERR_INV_LENGTH;
1012
1013  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1014    {
1015      if ((inbuflen % blocksize) == 0)
1016	nblocks--;
1017    }
1018
1019  if (c->bulk.cbc_enc)
1020    {
1021      c->bulk.cbc_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks,
1022                       (c->flags & GCRY_CIPHER_CBC_MAC));
1023      inbuf  += nblocks * blocksize;
1024      if (!(c->flags & GCRY_CIPHER_CBC_MAC))
1025        outbuf += nblocks * blocksize;
1026    }
1027  else
1028    {
1029      for (n=0; n < nblocks; n++ )
1030        {
1031          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1032            outbuf[i] = inbuf[i] ^ *ivp++;
1033          c->cipher->encrypt ( &c->context.c, outbuf, outbuf );
1034          memcpy (c->u_iv.iv, outbuf, blocksize );
1035          inbuf  += blocksize;
1036          if (!(c->flags & GCRY_CIPHER_CBC_MAC))
1037            outbuf += blocksize;
1038        }
1039    }
1040
1041  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1042    {
1043      /* We have to be careful here, since outbuf might be equal to
1044         inbuf.  */
1045      int restbytes;
1046      unsigned char b;
1047
1048      if ((inbuflen % blocksize) == 0)
1049        restbytes = blocksize;
1050      else
1051        restbytes = inbuflen % blocksize;
1052
1053      outbuf -= blocksize;
1054      for (ivp = c->u_iv.iv, i = 0; i < restbytes; i++)
1055        {
1056          b = inbuf[i];
1057          outbuf[blocksize + i] = outbuf[i];
1058          outbuf[i] = b ^ *ivp++;
1059        }
1060      for (; i < blocksize; i++)
1061        outbuf[i] = 0 ^ *ivp++;
1062
1063      c->cipher->encrypt (&c->context.c, outbuf, outbuf);
1064      memcpy (c->u_iv.iv, outbuf, blocksize);
1065    }
1066
1067  return 0;
1068}
1069
1070
1071static gcry_err_code_t
1072do_cbc_decrypt (gcry_cipher_hd_t c,
1073                unsigned char *outbuf, unsigned int outbuflen,
1074                const unsigned char *inbuf, unsigned int inbuflen)
1075{
1076  unsigned int n;
1077  unsigned char *ivp;
1078  int i;
1079  size_t blocksize = c->cipher->blocksize;
1080  unsigned int nblocks = inbuflen / blocksize;
1081
1082  if (outbuflen < inbuflen)
1083    return GPG_ERR_BUFFER_TOO_SHORT;
1084
1085  if ((inbuflen % c->cipher->blocksize)
1086      && !(inbuflen > c->cipher->blocksize
1087           && (c->flags & GCRY_CIPHER_CBC_CTS)))
1088    return GPG_ERR_INV_LENGTH;
1089
1090  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1091    {
1092      nblocks--;
1093      if ((inbuflen % blocksize) == 0)
1094	nblocks--;
1095      memcpy (c->lastiv, c->u_iv.iv, blocksize);
1096    }
1097
1098  if (c->bulk.cbc_dec)
1099    {
1100      c->bulk.cbc_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1101      inbuf  += nblocks * blocksize;
1102      outbuf += nblocks * blocksize;
1103    }
1104  else
1105    {
1106      for (n=0; n < nblocks; n++ )
1107        {
1108          /* Because outbuf and inbuf might be the same, we have to
1109           * save the original ciphertext block.  We use LASTIV for
1110           * this here because it is not used otherwise. */
1111          memcpy (c->lastiv, inbuf, blocksize);
1112          c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
1113          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1114	    outbuf[i] ^= *ivp++;
1115          memcpy(c->u_iv.iv, c->lastiv, blocksize );
1116          inbuf  += c->cipher->blocksize;
1117          outbuf += c->cipher->blocksize;
1118        }
1119    }
1120
1121  if ((c->flags & GCRY_CIPHER_CBC_CTS) && inbuflen > blocksize)
1122    {
1123      int restbytes;
1124
1125      if ((inbuflen % blocksize) == 0)
1126        restbytes = blocksize;
1127      else
1128        restbytes = inbuflen % blocksize;
1129
1130      memcpy (c->lastiv, c->u_iv.iv, blocksize );         /* Save Cn-2. */
1131      memcpy (c->u_iv.iv, inbuf + blocksize, restbytes ); /* Save Cn. */
1132
1133      c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
1134      for (ivp=c->u_iv.iv,i=0; i < restbytes; i++ )
1135        outbuf[i] ^= *ivp++;
1136
1137      memcpy(outbuf + blocksize, outbuf, restbytes);
1138      for(i=restbytes; i < blocksize; i++)
1139        c->u_iv.iv[i] = outbuf[i];
1140      c->cipher->decrypt (&c->context.c, outbuf, c->u_iv.iv);
1141      for(ivp=c->lastiv,i=0; i < blocksize; i++ )
1142        outbuf[i] ^= *ivp++;
1143      /* c->lastiv is now really lastlastiv, does this matter? */
1144    }
1145
1146  return 0;
1147}
1148
1149
1150static gcry_err_code_t
1151do_cfb_encrypt (gcry_cipher_hd_t c,
1152                unsigned char *outbuf, unsigned int outbuflen,
1153                const unsigned char *inbuf, unsigned int inbuflen)
1154{
1155  unsigned char *ivp;
1156  size_t blocksize = c->cipher->blocksize;
1157  size_t blocksize_x_2 = blocksize + blocksize;
1158
1159  if (outbuflen < inbuflen)
1160    return GPG_ERR_BUFFER_TOO_SHORT;
1161
1162  if ( inbuflen <= c->unused )
1163    {
1164      /* Short enough to be encoded by the remaining XOR mask. */
1165      /* XOR the input with the IV and store input into IV. */
1166      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
1167           inbuflen;
1168           inbuflen--, c->unused-- )
1169        *outbuf++ = (*ivp++ ^= *inbuf++);
1170      return 0;
1171    }
1172
1173  if ( c->unused )
1174    {
1175      /* XOR the input with the IV and store input into IV */
1176      inbuflen -= c->unused;
1177      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1178        *outbuf++ = (*ivp++ ^= *inbuf++);
1179    }
1180
1181  /* Now we can process complete blocks.  We use a loop as long as we
1182     have at least 2 blocks and use conditions for the rest.  This
1183     also allows to use a bulk encryption function if available.  */
1184  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_enc)
1185    {
1186      unsigned int nblocks = inbuflen / blocksize;
1187      c->bulk.cfb_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1188      outbuf += nblocks * blocksize;
1189      inbuf  += nblocks * blocksize;
1190      inbuflen -= nblocks * blocksize;
1191    }
1192  else
1193    {
1194      while ( inbuflen >= blocksize_x_2 )
1195        {
1196          int i;
1197          /* Encrypt the IV. */
1198          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1199          /* XOR the input with the IV and store input into IV.  */
1200          for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1201            *outbuf++ = (*ivp++ ^= *inbuf++);
1202          inbuflen -= blocksize;
1203        }
1204    }
1205
1206  if ( inbuflen >= blocksize )
1207    {
1208      int i;
1209      /* Save the current IV and then encrypt the IV. */
1210      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1211      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1212      /* XOR the input with the IV and store input into IV */
1213      for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1214        *outbuf++ = (*ivp++ ^= *inbuf++);
1215      inbuflen -= blocksize;
1216    }
1217  if ( inbuflen )
1218    {
1219      /* Save the current IV and then encrypt the IV. */
1220      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1221      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1222      c->unused = blocksize;
1223      /* Apply the XOR. */
1224      c->unused -= inbuflen;
1225      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1226        *outbuf++ = (*ivp++ ^= *inbuf++);
1227    }
1228  return 0;
1229}
1230
1231
1232static gcry_err_code_t
1233do_cfb_decrypt (gcry_cipher_hd_t c,
1234                unsigned char *outbuf, unsigned int outbuflen,
1235                const unsigned char *inbuf, unsigned int inbuflen)
1236{
1237  unsigned char *ivp;
1238  unsigned long temp;
1239  int i;
1240  size_t blocksize = c->cipher->blocksize;
1241  size_t blocksize_x_2 = blocksize + blocksize;
1242
1243  if (outbuflen < inbuflen)
1244    return GPG_ERR_BUFFER_TOO_SHORT;
1245
1246  if (inbuflen <= c->unused)
1247    {
1248      /* Short enough to be encoded by the remaining XOR mask. */
1249      /* XOR the input with the IV and store input into IV. */
1250      for (ivp=c->u_iv.iv+blocksize - c->unused;
1251           inbuflen;
1252           inbuflen--, c->unused--)
1253        {
1254          temp = *inbuf++;
1255          *outbuf++ = *ivp ^ temp;
1256          *ivp++ = temp;
1257        }
1258      return 0;
1259    }
1260
1261  if (c->unused)
1262    {
1263      /* XOR the input with the IV and store input into IV. */
1264      inbuflen -= c->unused;
1265      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1266        {
1267          temp = *inbuf++;
1268          *outbuf++ = *ivp ^ temp;
1269          *ivp++ = temp;
1270        }
1271    }
1272
1273  /* Now we can process complete blocks.  We use a loop as long as we
1274     have at least 2 blocks and use conditions for the rest.  This
1275     also allows to use a bulk encryption function if available.  */
1276  if (inbuflen >= blocksize_x_2 && c->bulk.cfb_dec)
1277    {
1278      unsigned int nblocks = inbuflen / blocksize;
1279      c->bulk.cfb_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1280      outbuf += nblocks * blocksize;
1281      inbuf  += nblocks * blocksize;
1282      inbuflen -= nblocks * blocksize;
1283    }
1284  else
1285    {
1286      while (inbuflen >= blocksize_x_2 )
1287        {
1288          /* Encrypt the IV. */
1289          c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1290          /* XOR the input with the IV and store input into IV. */
1291          for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1292            {
1293              temp = *inbuf++;
1294              *outbuf++ = *ivp ^ temp;
1295              *ivp++ = temp;
1296            }
1297          inbuflen -= blocksize;
1298        }
1299    }
1300
1301  if (inbuflen >= blocksize )
1302    {
1303      /* Save the current IV and then encrypt the IV. */
1304      memcpy ( c->lastiv, c->u_iv.iv, blocksize);
1305      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1306      /* XOR the input with the IV and store input into IV */
1307      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1308        {
1309          temp = *inbuf++;
1310          *outbuf++ = *ivp ^ temp;
1311          *ivp++ = temp;
1312        }
1313      inbuflen -= blocksize;
1314    }
1315
1316  if (inbuflen)
1317    {
1318      /* Save the current IV and then encrypt the IV. */
1319      memcpy ( c->lastiv, c->u_iv.iv, blocksize );
1320      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1321      c->unused = blocksize;
1322      /* Apply the XOR. */
1323      c->unused -= inbuflen;
1324      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1325        {
1326          temp = *inbuf++;
1327          *outbuf++ = *ivp ^ temp;
1328          *ivp++ = temp;
1329        }
1330    }
1331  return 0;
1332}
1333
1334
1335static gcry_err_code_t
1336do_ofb_encrypt (gcry_cipher_hd_t c,
1337                unsigned char *outbuf, unsigned int outbuflen,
1338                const unsigned char *inbuf, unsigned int inbuflen)
1339{
1340  unsigned char *ivp;
1341  size_t blocksize = c->cipher->blocksize;
1342
1343  if (outbuflen < inbuflen)
1344    return GPG_ERR_BUFFER_TOO_SHORT;
1345
1346  if ( inbuflen <= c->unused )
1347    {
1348      /* Short enough to be encoded by the remaining XOR mask. */
1349      /* XOR the input with the IV */
1350      for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
1351           inbuflen;
1352           inbuflen--, c->unused-- )
1353        *outbuf++ = (*ivp++ ^ *inbuf++);
1354      return 0;
1355    }
1356
1357  if( c->unused )
1358    {
1359      inbuflen -= c->unused;
1360      for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1361        *outbuf++ = (*ivp++ ^ *inbuf++);
1362    }
1363
1364  /* Now we can process complete blocks. */
1365  while ( inbuflen >= blocksize )
1366    {
1367      int i;
1368      /* Encrypt the IV (and save the current one). */
1369      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1370      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1371
1372      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1373        *outbuf++ = (*ivp++ ^ *inbuf++);
1374      inbuflen -= blocksize;
1375    }
1376  if ( inbuflen )
1377    { /* process the remaining bytes */
1378      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1379      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1380      c->unused = blocksize;
1381      c->unused -= inbuflen;
1382      for(ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1383        *outbuf++ = (*ivp++ ^ *inbuf++);
1384    }
1385  return 0;
1386}
1387
1388static gcry_err_code_t
1389do_ofb_decrypt (gcry_cipher_hd_t c,
1390                unsigned char *outbuf, unsigned int outbuflen,
1391                const unsigned char *inbuf, unsigned int inbuflen)
1392{
1393  unsigned char *ivp;
1394  size_t blocksize = c->cipher->blocksize;
1395
1396  if (outbuflen < inbuflen)
1397    return GPG_ERR_BUFFER_TOO_SHORT;
1398
1399  if( inbuflen <= c->unused )
1400    {
1401      /* Short enough to be encoded by the remaining XOR mask. */
1402      for (ivp=c->u_iv.iv+blocksize - c->unused; inbuflen; inbuflen--,c->unused--)
1403        *outbuf++ = *ivp++ ^ *inbuf++;
1404      return 0;
1405    }
1406
1407  if ( c->unused )
1408    {
1409      inbuflen -= c->unused;
1410      for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1411        *outbuf++ = *ivp++ ^ *inbuf++;
1412    }
1413
1414  /* Now we can process complete blocks. */
1415  while ( inbuflen >= blocksize )
1416    {
1417      int i;
1418      /* Encrypt the IV (and save the current one). */
1419      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1420      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1421      for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1422        *outbuf++ = *ivp++ ^ *inbuf++;
1423      inbuflen -= blocksize;
1424    }
1425  if ( inbuflen )
1426    { /* Process the remaining bytes. */
1427      /* Encrypt the IV (and save the current one). */
1428      memcpy( c->lastiv, c->u_iv.iv, blocksize );
1429      c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1430      c->unused = blocksize;
1431      c->unused -= inbuflen;
1432      for (ivp=c->u_iv.iv; inbuflen; inbuflen-- )
1433        *outbuf++ = *ivp++ ^ *inbuf++;
1434    }
1435  return 0;
1436}
1437
1438
1439static gcry_err_code_t
1440do_ctr_encrypt (gcry_cipher_hd_t c,
1441                unsigned char *outbuf, unsigned int outbuflen,
1442                const unsigned char *inbuf, unsigned int inbuflen)
1443{
1444  unsigned int n;
1445  int i;
1446  unsigned int blocksize = c->cipher->blocksize;
1447  unsigned int nblocks;
1448
1449  if (outbuflen < inbuflen)
1450    return GPG_ERR_BUFFER_TOO_SHORT;
1451
1452  /* First process a left over encrypted counter.  */
1453  if (c->unused)
1454    {
1455      gcry_assert (c->unused < blocksize);
1456      i = blocksize - c->unused;
1457      for (n=0; c->unused && n < inbuflen; c->unused--, n++, i++)
1458        {
1459          /* XOR input with encrypted counter and store in output.  */
1460          outbuf[n] = inbuf[n] ^ c->lastiv[i];
1461        }
1462      inbuf  += n;
1463      outbuf += n;
1464      inbuflen -= n;
1465    }
1466
1467
1468  /* Use a bulk method if available.  */
1469  nblocks = inbuflen / blocksize;
1470  if (nblocks && c->bulk.ctr_enc)
1471    {
1472      c->bulk.ctr_enc (&c->context.c, c->u_ctr.ctr, outbuf, inbuf, nblocks);
1473      inbuf  += nblocks * blocksize;
1474      outbuf += nblocks * blocksize;
1475      inbuflen -= nblocks * blocksize;
1476    }
1477
1478  /* If we don't have a bulk method use the standard method.  We also
1479     use this method for the a remaining partial block.  */
1480  if (inbuflen)
1481    {
1482      unsigned char tmp[MAX_BLOCKSIZE];
1483
1484      for (n=0; n < inbuflen; n++)
1485        {
1486          if ((n % blocksize) == 0)
1487            {
1488              c->cipher->encrypt (&c->context.c, tmp, c->u_ctr.ctr);
1489
1490              for (i = blocksize; i > 0; i--)
1491                {
1492                  c->u_ctr.ctr[i-1]++;
1493                  if (c->u_ctr.ctr[i-1] != 0)
1494                    break;
1495                }
1496            }
1497
1498          /* XOR input with encrypted counter and store in output.  */
1499          outbuf[n] = inbuf[n] ^ tmp[n % blocksize];
1500        }
1501
1502      /* Save the unused bytes of the counter.  */
1503      n %= blocksize;
1504      c->unused = (blocksize - n) % blocksize;
1505      if (c->unused)
1506        memcpy (c->lastiv+n, tmp+n, c->unused);
1507
1508      wipememory (tmp, sizeof tmp);
1509    }
1510
1511  return 0;
1512}
1513
1514static gcry_err_code_t
1515do_ctr_decrypt (gcry_cipher_hd_t c,
1516                unsigned char *outbuf, unsigned int outbuflen,
1517                const unsigned char *inbuf, unsigned int inbuflen)
1518{
1519  return do_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1520}
1521
1522
1523/* Perform the AES-Wrap algorithm as specified by RFC3394.  We
1524   implement this as a mode usable with any cipher algorithm of
1525   blocksize 128.  */
1526static gcry_err_code_t
1527do_aeswrap_encrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1528                    const byte *inbuf, unsigned int inbuflen )
1529{
1530  int j, x;
1531  unsigned int n, i;
1532  unsigned char *r, *a, *b;
1533  unsigned char t[8];
1534
1535#if MAX_BLOCKSIZE < 8
1536#error Invalid block size
1537#endif
1538  /* We require a cipher with a 128 bit block length.  */
1539  if (c->cipher->blocksize != 16)
1540    return GPG_ERR_INV_LENGTH;
1541
1542  /* The output buffer must be able to hold the input data plus one
1543     additional block.  */
1544  if (outbuflen < inbuflen + 8)
1545    return GPG_ERR_BUFFER_TOO_SHORT;
1546  /* Input data must be multiple of 64 bits.  */
1547  if (inbuflen % 8)
1548    return GPG_ERR_INV_ARG;
1549
1550  n = inbuflen / 8;
1551
1552  /* We need at least two 64 bit blocks.  */
1553  if (n < 2)
1554    return GPG_ERR_INV_ARG;
1555
1556  r = outbuf;
1557  a = outbuf;  /* We store A directly in OUTBUF.  */
1558  b = c->u_ctr.ctr;  /* B is also used to concatenate stuff.  */
1559
1560  /* If an IV has been set we use that IV as the Alternative Initial
1561     Value; if it has not been set we use the standard value.  */
1562  if (c->marks.iv)
1563    memcpy (a, c->u_iv.iv, 8);
1564  else
1565    memset (a, 0xa6, 8);
1566
1567  /* Copy the inbuf to the outbuf. */
1568  memmove (r+8, inbuf, inbuflen);
1569
1570  memset (t, 0, sizeof t); /* t := 0.  */
1571
1572  for (j = 0; j <= 5; j++)
1573    {
1574      for (i = 1; i <= n; i++)
1575        {
1576          /* B := AES_k( A | R[i] ) */
1577          memcpy (b, a, 8);
1578          memcpy (b+8, r+i*8, 8);
1579          c->cipher->encrypt (&c->context.c, b, b);
1580          /* t := t + 1  */
1581	  for (x = 7; x >= 0; x--)
1582	    {
1583	      t[x]++;
1584	      if (t[x])
1585		break;
1586	    }
1587          /* A := MSB_64(B) ^ t */
1588          for (x=0; x < 8; x++)
1589            a[x] = b[x] ^ t[x];
1590          /* R[i] := LSB_64(B) */
1591          memcpy (r+i*8, b+8, 8);
1592        }
1593   }
1594
1595  return 0;
1596}
1597
1598/* Perform the AES-Unwrap algorithm as specified by RFC3394.  We
1599   implement this as a mode usable with any cipher algorithm of
1600   blocksize 128.  */
1601static gcry_err_code_t
1602do_aeswrap_decrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1603                    const byte *inbuf, unsigned int inbuflen)
1604{
1605  int j, x;
1606  unsigned int n, i;
1607  unsigned char *r, *a, *b;
1608  unsigned char t[8];
1609
1610#if MAX_BLOCKSIZE < 8
1611#error Invalid block size
1612#endif
1613  /* We require a cipher with a 128 bit block length.  */
1614  if (c->cipher->blocksize != 16)
1615    return GPG_ERR_INV_LENGTH;
1616
1617  /* The output buffer must be able to hold the input data minus one
1618     additional block.  Fixme: The caller has more restrictive checks
1619     - we may want to fix them for this mode.  */
1620  if (outbuflen + 8  < inbuflen)
1621    return GPG_ERR_BUFFER_TOO_SHORT;
1622  /* Input data must be multiple of 64 bits.  */
1623  if (inbuflen % 8)
1624    return GPG_ERR_INV_ARG;
1625
1626  n = inbuflen / 8;
1627
1628  /* We need at least three 64 bit blocks.  */
1629  if (n < 3)
1630    return GPG_ERR_INV_ARG;
1631
1632  r = outbuf;
1633  a = c->lastiv;  /* We use c->LASTIV as buffer for A.  */
1634  b = c->u_ctr.ctr;     /* B is also used to concatenate stuff.  */
1635
1636  /* Copy the inbuf to the outbuf and save A. */
1637  memcpy (a, inbuf, 8);
1638  memmove (r, inbuf+8, inbuflen-8);
1639  n--; /* Reduce to actual number of data blocks.  */
1640
1641  /* t := 6 * n  */
1642  i = n * 6;  /* The range is valid because: n = inbuflen / 8 - 1.  */
1643  for (x=0; x < 8 && x < sizeof (i); x++)
1644    t[7-x] = i >> (8*x);
1645  for (; x < 8; x++)
1646    t[7-x] = 0;
1647
1648  for (j = 5; j >= 0; j--)
1649    {
1650      for (i = n; i >= 1; i--)
1651        {
1652          /* B := AES_k^1( (A ^ t)| R[i] ) */
1653          for (x = 0; x < 8; x++)
1654            b[x] = a[x] ^ t[x];
1655          memcpy (b+8, r+(i-1)*8, 8);
1656          c->cipher->decrypt (&c->context.c, b, b);
1657          /* t := t - 1  */
1658	  for (x = 7; x >= 0; x--)
1659	    {
1660	      t[x]--;
1661	      if (t[x] != 0xff)
1662		break;
1663	    }
1664          /* A := MSB_64(B) */
1665          memcpy (a, b, 8);
1666          /* R[i] := LSB_64(B) */
1667          memcpy (r+(i-1)*8, b+8, 8);
1668        }
1669   }
1670
1671  /* If an IV has been set we compare against this Alternative Initial
1672     Value; if it has not been set we compare against the standard IV.  */
1673  if (c->marks.iv)
1674    j = memcmp (a, c->u_iv.iv, 8);
1675  else
1676    {
1677      for (j=0, x=0; x < 8; x++)
1678        if (a[x] != 0xa6)
1679          {
1680            j=1;
1681            break;
1682          }
1683    }
1684  return j? GPG_ERR_CHECKSUM : 0;
1685}
1686
1687
1688/****************
1689 * Encrypt INBUF to OUTBUF with the mode selected at open.
1690 * inbuf and outbuf may overlap or be the same.
1691 * Depending on the mode some constraints apply to INBUFLEN.
1692 */
1693static gcry_err_code_t
1694cipher_encrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1695		const byte *inbuf, unsigned int inbuflen)
1696{
1697  gcry_err_code_t rc;
1698
1699  switch (c->mode)
1700    {
1701    case GCRY_CIPHER_MODE_ECB:
1702      rc = do_ecb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1703      break;
1704
1705    case GCRY_CIPHER_MODE_CBC:
1706      rc = do_cbc_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1707      break;
1708
1709    case GCRY_CIPHER_MODE_CFB:
1710      rc = do_cfb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1711      break;
1712
1713    case GCRY_CIPHER_MODE_OFB:
1714      rc = do_ofb_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1715      break;
1716
1717    case GCRY_CIPHER_MODE_CTR:
1718      rc = do_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1719      break;
1720
1721    case GCRY_CIPHER_MODE_AESWRAP:
1722      rc = do_aeswrap_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1723      break;
1724
1725    case GCRY_CIPHER_MODE_STREAM:
1726      c->cipher->stencrypt (&c->context.c,
1727                            outbuf, (byte*)/*arggg*/inbuf, inbuflen);
1728      rc = 0;
1729      break;
1730
1731    case GCRY_CIPHER_MODE_NONE:
1732      if (fips_mode () || !_gcry_get_debug_flag (0))
1733        {
1734          fips_signal_error ("cipher mode NONE used");
1735          rc = GPG_ERR_INV_CIPHER_MODE;
1736        }
1737      else
1738        {
1739          if (inbuf != outbuf)
1740            memmove (outbuf, inbuf, inbuflen);
1741          rc = 0;
1742        }
1743      break;
1744
1745    default:
1746      log_fatal ("cipher_encrypt: invalid mode %d\n", c->mode );
1747      rc = GPG_ERR_INV_CIPHER_MODE;
1748      break;
1749    }
1750
1751  return rc;
1752}
1753
1754
1755/****************
1756 * Encrypt IN and write it to OUT.  If IN is NULL, in-place encryption has
1757 * been requested.
1758 */
1759gcry_error_t
1760gcry_cipher_encrypt (gcry_cipher_hd_t h, void *out, size_t outsize,
1761                     const void *in, size_t inlen)
1762{
1763  gcry_err_code_t err;
1764
1765  if (!in)  /* Caller requested in-place encryption.  */
1766    err = cipher_encrypt (h, out, outsize, out, outsize);
1767  else
1768    err = cipher_encrypt (h, out, outsize, in, inlen);
1769
1770  /* Failsafe: Make sure that the plaintext will never make it into
1771     OUT if the encryption returned an error.  */
1772  if (err && out)
1773    memset (out, 0x42, outsize);
1774
1775  return gcry_error (err);
1776}
1777
1778
1779
1780/****************
1781 * Decrypt INBUF to OUTBUF with the mode selected at open.
1782 * inbuf and outbuf may overlap or be the same.
1783 * Depending on the mode some some contraints apply to INBUFLEN.
1784 */
1785static gcry_err_code_t
1786cipher_decrypt (gcry_cipher_hd_t c, byte *outbuf, unsigned int outbuflen,
1787                const byte *inbuf, unsigned int inbuflen)
1788{
1789  gcry_err_code_t rc;
1790
1791  switch (c->mode)
1792    {
1793    case GCRY_CIPHER_MODE_ECB:
1794      rc = do_ecb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1795      break;
1796
1797    case GCRY_CIPHER_MODE_CBC:
1798      rc = do_cbc_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1799      break;
1800
1801    case GCRY_CIPHER_MODE_CFB:
1802      rc = do_cfb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1803      break;
1804
1805    case GCRY_CIPHER_MODE_OFB:
1806      rc = do_ofb_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1807      break;
1808
1809    case GCRY_CIPHER_MODE_CTR:
1810      rc = do_ctr_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1811      break;
1812
1813    case GCRY_CIPHER_MODE_AESWRAP:
1814      rc = do_aeswrap_decrypt (c, outbuf, outbuflen, inbuf, inbuflen);
1815      break;
1816
1817    case GCRY_CIPHER_MODE_STREAM:
1818      c->cipher->stdecrypt (&c->context.c,
1819                            outbuf, (byte*)/*arggg*/inbuf, inbuflen);
1820      rc = 0;
1821      break;
1822
1823    case GCRY_CIPHER_MODE_NONE:
1824      if (fips_mode () || !_gcry_get_debug_flag (0))
1825        {
1826          fips_signal_error ("cipher mode NONE used");
1827          rc = GPG_ERR_INV_CIPHER_MODE;
1828        }
1829      else
1830        {
1831          if (inbuf != outbuf)
1832            memmove (outbuf, inbuf, inbuflen);
1833          rc = 0;
1834        }
1835      break;
1836
1837    default:
1838      log_fatal ("cipher_decrypt: invalid mode %d\n", c->mode );
1839      rc = GPG_ERR_INV_CIPHER_MODE;
1840      break;
1841    }
1842
1843  return rc;
1844}
1845
1846
1847gcry_error_t
1848gcry_cipher_decrypt (gcry_cipher_hd_t h, void *out, size_t outsize,
1849		     const void *in, size_t inlen)
1850{
1851  gcry_err_code_t err;
1852
1853  if (!in) /* Caller requested in-place encryption. */
1854    err = cipher_decrypt (h, out, outsize, out, outsize);
1855  else
1856    err = cipher_decrypt (h, out, outsize, in, inlen);
1857
1858  return gcry_error (err);
1859}
1860
1861
1862
1863/****************
1864 * Used for PGP's somewhat strange CFB mode. Only works if
1865 * the corresponding flag is set.
1866 */
1867static void
1868cipher_sync (gcry_cipher_hd_t c)
1869{
1870  if ((c->flags & GCRY_CIPHER_ENABLE_SYNC) && c->unused)
1871    {
1872      memmove (c->u_iv.iv + c->unused,
1873               c->u_iv.iv, c->cipher->blocksize - c->unused);
1874      memcpy (c->u_iv.iv,
1875              c->lastiv + c->cipher->blocksize - c->unused, c->unused);
1876      c->unused = 0;
1877    }
1878}
1879
1880
1881gcry_error_t
1882_gcry_cipher_setkey (gcry_cipher_hd_t hd, const void *key, size_t keylen)
1883{
1884  return cipher_setkey (hd, (void*)key, keylen);
1885}
1886
1887
1888gcry_error_t
1889_gcry_cipher_setiv (gcry_cipher_hd_t hd, const void *iv, size_t ivlen)
1890{
1891  cipher_setiv (hd, iv, ivlen);
1892  return 0;
1893}
1894
1895/* Set counter for CTR mode.  (CTR,CTRLEN) must denote a buffer of
1896   block size length, or (NULL,0) to set the CTR to the all-zero
1897   block. */
1898gpg_error_t
1899_gcry_cipher_setctr (gcry_cipher_hd_t hd, const void *ctr, size_t ctrlen)
1900{
1901  if (ctr && ctrlen == hd->cipher->blocksize)
1902    {
1903      memcpy (hd->u_ctr.ctr, ctr, hd->cipher->blocksize);
1904      hd->unused = 0;
1905    }
1906  else if (!ctr || !ctrlen)
1907    {
1908      memset (hd->u_ctr.ctr, 0, hd->cipher->blocksize);
1909      hd->unused = 0;
1910    }
1911  else
1912    return gpg_error (GPG_ERR_INV_ARG);
1913  return 0;
1914}
1915
1916
1917gcry_error_t
1918gcry_cipher_ctl( gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen)
1919{
1920  gcry_err_code_t rc = GPG_ERR_NO_ERROR;
1921
1922  switch (cmd)
1923    {
1924    case GCRYCTL_SET_KEY:  /* Deprecated; use gcry_cipher_setkey.  */
1925      rc = cipher_setkey( h, buffer, buflen );
1926      break;
1927
1928    case GCRYCTL_SET_IV:   /* Deprecated; use gcry_cipher_setiv.  */
1929      cipher_setiv( h, buffer, buflen );
1930      break;
1931
1932    case GCRYCTL_RESET:
1933      cipher_reset (h);
1934      break;
1935
1936    case GCRYCTL_CFB_SYNC:
1937      cipher_sync( h );
1938      break;
1939
1940    case GCRYCTL_SET_CBC_CTS:
1941      if (buflen)
1942	if (h->flags & GCRY_CIPHER_CBC_MAC)
1943	  rc = GPG_ERR_INV_FLAG;
1944	else
1945	  h->flags |= GCRY_CIPHER_CBC_CTS;
1946      else
1947	h->flags &= ~GCRY_CIPHER_CBC_CTS;
1948      break;
1949
1950    case GCRYCTL_SET_CBC_MAC:
1951      if (buflen)
1952	if (h->flags & GCRY_CIPHER_CBC_CTS)
1953	  rc = GPG_ERR_INV_FLAG;
1954	else
1955	  h->flags |= GCRY_CIPHER_CBC_MAC;
1956      else
1957	h->flags &= ~GCRY_CIPHER_CBC_MAC;
1958      break;
1959
1960    case GCRYCTL_DISABLE_ALGO:
1961      /* This command expects NULL for H and BUFFER to point to an
1962         integer with the algo number.  */
1963      if( h || !buffer || buflen != sizeof(int) )
1964	return gcry_error (GPG_ERR_CIPHER_ALGO);
1965      disable_cipher_algo( *(int*)buffer );
1966      break;
1967
1968    case GCRYCTL_SET_CTR: /* Deprecated; use gcry_cipher_setctr.  */
1969      rc = gpg_err_code (_gcry_cipher_setctr (h, buffer, buflen));
1970      break;
1971
1972    case 61:  /* Disable weak key detection (private).  */
1973      if (h->extraspec->set_extra_info)
1974        rc = h->extraspec->set_extra_info
1975          (&h->context.c, CIPHER_INFO_NO_WEAK_KEY, NULL, 0);
1976      else
1977        rc = GPG_ERR_NOT_SUPPORTED;
1978      break;
1979
1980    case 62: /* Return current input vector (private).  */
1981      /* This is the input block as used in CFB and OFB mode which has
1982         initially been set as IV.  The returned format is:
1983           1 byte  Actual length of the block in bytes.
1984           n byte  The block.
1985         If the provided buffer is too short, an error is returned. */
1986      if (buflen < (1 + h->cipher->blocksize))
1987        rc = GPG_ERR_TOO_SHORT;
1988      else
1989        {
1990          unsigned char *ivp;
1991          unsigned char *dst = buffer;
1992          int n = h->unused;
1993
1994          if (!n)
1995            n = h->cipher->blocksize;
1996          gcry_assert (n <= h->cipher->blocksize);
1997          *dst++ = n;
1998          ivp = h->u_iv.iv + h->cipher->blocksize - n;
1999          while (n--)
2000            *dst++ = *ivp++;
2001        }
2002      break;
2003
2004    default:
2005      rc = GPG_ERR_INV_OP;
2006    }
2007
2008  return gcry_error (rc);
2009}
2010
2011
2012/* Return information about the cipher handle H.  CMD is the kind of
2013   information requested.  BUFFER and NBYTES are reserved for now.
2014
2015   There are no values for CMD yet defined.
2016
2017   The function always returns GPG_ERR_INV_OP.
2018
2019 */
2020gcry_error_t
2021gcry_cipher_info (gcry_cipher_hd_t h, int cmd, void *buffer, size_t *nbytes)
2022{
2023  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2024
2025  (void)h;
2026  (void)buffer;
2027  (void)nbytes;
2028
2029  switch (cmd)
2030    {
2031    default:
2032      err = GPG_ERR_INV_OP;
2033    }
2034
2035  return gcry_error (err);
2036}
2037
2038/* Return information about the given cipher algorithm ALGO.
2039
2040   WHAT select the kind of information returned:
2041
2042    GCRYCTL_GET_KEYLEN:
2043  	Return the length of the key.  If the algorithm ALGO
2044  	supports multiple key lengths, the maximum supported key length
2045  	is returned.  The key length is returned as number of octets.
2046  	BUFFER and NBYTES must be zero.
2047
2048    GCRYCTL_GET_BLKLEN:
2049  	Return the blocklength of the algorithm ALGO counted in octets.
2050  	BUFFER and NBYTES must be zero.
2051
2052    GCRYCTL_TEST_ALGO:
2053  	Returns 0 if the specified algorithm ALGO is available for use.
2054  	BUFFER and NBYTES must be zero.
2055
2056   Note: Because this function is in most cases used to return an
2057   integer value, we can make it easier for the caller to just look at
2058   the return value.  The caller will in all cases consult the value
2059   and thereby detecting whether a error occurred or not (i.e. while
2060   checking the block size)
2061 */
2062gcry_error_t
2063gcry_cipher_algo_info (int algo, int what, void *buffer, size_t *nbytes)
2064{
2065  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2066  unsigned int ui;
2067
2068  switch (what)
2069    {
2070    case GCRYCTL_GET_KEYLEN:
2071      if (buffer || (! nbytes))
2072	err = GPG_ERR_CIPHER_ALGO;
2073      else
2074	{
2075	  ui = cipher_get_keylen (algo);
2076	  if ((ui > 0) && (ui <= 512))
2077	    *nbytes = (size_t) ui / 8;
2078	  else
2079	    /* The only reason for an error is an invalid algo.  */
2080	    err = GPG_ERR_CIPHER_ALGO;
2081	}
2082      break;
2083
2084    case GCRYCTL_GET_BLKLEN:
2085      if (buffer || (! nbytes))
2086	err = GPG_ERR_CIPHER_ALGO;
2087      else
2088	{
2089	  ui = cipher_get_blocksize (algo);
2090	  if ((ui > 0) && (ui < 10000))
2091	    *nbytes = ui;
2092	  else
2093	    /* The only reason is an invalid algo or a strange
2094	       blocksize.  */
2095	    err = GPG_ERR_CIPHER_ALGO;
2096	}
2097      break;
2098
2099    case GCRYCTL_TEST_ALGO:
2100      if (buffer || nbytes)
2101	err = GPG_ERR_INV_ARG;
2102      else
2103	err = check_cipher_algo (algo);
2104      break;
2105
2106      default:
2107	err = GPG_ERR_INV_OP;
2108    }
2109
2110  return gcry_error (err);
2111}
2112
2113
2114/* This function returns length of the key for algorithm ALGO.  If the
2115   algorithm supports multiple key lengths, the maximum supported key
2116   length is returned.  On error 0 is returned.  The key length is
2117   returned as number of octets.
2118
2119   This is a convenience functions which should be preferred over
2120   gcry_cipher_algo_info because it allows for proper type
2121   checking.  */
2122size_t
2123gcry_cipher_get_algo_keylen (int algo)
2124{
2125  size_t n;
2126
2127  if (gcry_cipher_algo_info (algo, GCRYCTL_GET_KEYLEN, NULL, &n))
2128    n = 0;
2129  return n;
2130}
2131
2132/* This functions returns the blocklength of the algorithm ALGO
2133   counted in octets.  On error 0 is returned.
2134
2135   This is a convenience functions which should be preferred over
2136   gcry_cipher_algo_info because it allows for proper type
2137   checking.  */
2138size_t
2139gcry_cipher_get_algo_blklen (int algo)
2140{
2141  size_t n;
2142
2143  if (gcry_cipher_algo_info( algo, GCRYCTL_GET_BLKLEN, NULL, &n))
2144    n = 0;
2145  return n;
2146}
2147
2148/* Explicitly initialize this module.  */
2149gcry_err_code_t
2150_gcry_cipher_init (void)
2151{
2152  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2153
2154  REGISTER_DEFAULT_CIPHERS;
2155
2156  return err;
2157}
2158
2159/* Get a list consisting of the IDs of the loaded cipher modules.  If
2160   LIST is zero, write the number of loaded cipher modules to
2161   LIST_LENGTH and return.  If LIST is non-zero, the first
2162   *LIST_LENGTH algorithm IDs are stored in LIST, which must be of
2163   according size.  In case there are less cipher modules than
2164   *LIST_LENGTH, *LIST_LENGTH is updated to the correct number.  */
2165gcry_error_t
2166gcry_cipher_list (int *list, int *list_length)
2167{
2168  gcry_err_code_t err = GPG_ERR_NO_ERROR;
2169
2170  ath_mutex_lock (&ciphers_registered_lock);
2171  err = _gcry_module_list (ciphers_registered, list, list_length);
2172  ath_mutex_unlock (&ciphers_registered_lock);
2173
2174  return err;
2175}
2176
2177
2178/* Run the selftests for cipher algorithm ALGO with optional reporting
2179   function REPORT.  */
2180gpg_error_t
2181_gcry_cipher_selftest (int algo, int extended, selftest_report_func_t report)
2182{
2183  gcry_module_t module = NULL;
2184  cipher_extra_spec_t *extraspec = NULL;
2185  gcry_err_code_t ec = 0;
2186
2187  REGISTER_DEFAULT_CIPHERS;
2188
2189  ath_mutex_lock (&ciphers_registered_lock);
2190  module = _gcry_module_lookup_id (ciphers_registered, algo);
2191  if (module && !(module->flags & FLAG_MODULE_DISABLED))
2192    extraspec = module->extraspec;
2193  ath_mutex_unlock (&ciphers_registered_lock);
2194  if (extraspec && extraspec->selftest)
2195    ec = extraspec->selftest (algo, extended, report);
2196  else
2197    {
2198      ec = GPG_ERR_CIPHER_ALGO;
2199      if (report)
2200        report ("cipher", algo, "module",
2201                module && !(module->flags & FLAG_MODULE_DISABLED)?
2202                "no selftest available" :
2203                module? "algorithm disabled" : "algorithm not found");
2204    }
2205
2206  if (module)
2207    {
2208      ath_mutex_lock (&ciphers_registered_lock);
2209      _gcry_module_release (module);
2210      ath_mutex_unlock (&ciphers_registered_lock);
2211    }
2212  return gpg_error (ec);
2213}
2214