1 /* cipher-ccm.c - CTR mode with CBC-MAC mode implementation
2 * Copyright (C) 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
4 * This file is part of Libgcrypt.
6 * Libgcrypt is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser general Public License as
8 * published by the Free Software Foundation; either version 2.1 of
9 * the License, or (at your option) any later version.
11 * Libgcrypt is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
29 #include "./cipher-internal.h"
32 #define set_burn(burn, nburn) do { \
33 unsigned int __nburn = (nburn); \
34 (burn) = (burn) > __nburn ? (burn) : __nburn; } while (0)
38 do_cbc_mac (gcry_cipher_hd_t c, const unsigned char *inbuf, size_t inlen,
41 const unsigned int blocksize = 16;
42 gcry_cipher_encrypt_t enc_fn = c->spec->encrypt;
43 unsigned char tmp[blocksize];
44 unsigned int burn = 0;
45 unsigned int unused = c->u_mode.ccm.mac_unused;
48 if (inlen == 0 && (unused == 0 || !do_padding))
53 if (inlen + unused < blocksize || unused > 0)
55 for (; inlen && unused < blocksize; inlen--)
56 c->u_mode.ccm.macbuf[unused++] = *inbuf++;
63 while (unused < blocksize)
64 c->u_mode.ccm.macbuf[unused++] = 0;
69 /* Process one block from macbuf. */
70 cipher_block_xor(c->u_iv.iv, c->u_iv.iv, c->u_mode.ccm.macbuf,
72 set_burn (burn, enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ));
79 nblocks = inlen / blocksize;
80 c->bulk.cbc_enc (&c->context.c, c->u_iv.iv, tmp, inbuf, nblocks, 1);
81 inbuf += nblocks * blocksize;
82 inlen -= nblocks * blocksize;
84 wipememory (tmp, sizeof(tmp));
88 while (inlen >= blocksize)
90 cipher_block_xor(c->u_iv.iv, c->u_iv.iv, inbuf, blocksize);
92 set_burn (burn, enc_fn ( &c->context.c, c->u_iv.iv, c->u_iv.iv ));
101 c->u_mode.ccm.mac_unused = unused;
104 burn += 4 * sizeof(void *);
111 _gcry_cipher_ccm_set_nonce (gcry_cipher_hd_t c, const unsigned char *nonce,
114 unsigned int marks_key;
115 size_t L = 15 - noncelen;
121 return GPG_ERR_INV_ARG;
122 /* Length field must be 2, 3, ..., or 8. */
124 return GPG_ERR_INV_LENGTH;
127 marks_key = c->marks.key;
128 memset (&c->u_mode, 0, sizeof(c->u_mode));
129 memset (&c->marks, 0, sizeof(c->marks));
130 memset (&c->u_iv, 0, sizeof(c->u_iv));
131 memset (&c->u_ctr, 0, sizeof(c->u_ctr));
132 memset (c->lastiv, 0, sizeof(c->lastiv));
134 c->marks.key = marks_key;
137 c->u_ctr.ctr[0] = L_;
138 memcpy (&c->u_ctr.ctr[1], nonce, noncelen);
139 memset (&c->u_ctr.ctr[1 + noncelen], 0, L);
143 memcpy (&c->u_iv.iv[1], nonce, noncelen);
144 /* Add (8 * M_ + 64 * flags) to iv[0] and set iv[noncelen + 1 ... 15] later
146 memset (&c->u_iv.iv[1 + noncelen], 0, L);
148 c->u_mode.ccm.nonce = 1;
150 return GPG_ERR_NO_ERROR;
155 _gcry_cipher_ccm_set_lengths (gcry_cipher_hd_t c, u64 encryptlen, u64 aadlen,
158 unsigned int burn = 0;
159 unsigned char b0[16];
160 size_t noncelen = 15 - (c->u_iv.iv[0] + 1);
167 /* Authentication field must be 4, 6, 8, 10, 12, 14 or 16. */
168 if ((M_ * 2 + 2) != M || M < 4 || M > 16)
169 return GPG_ERR_INV_LENGTH;
170 if (!c->u_mode.ccm.nonce || c->marks.tag)
171 return GPG_ERR_INV_STATE;
172 if (c->u_mode.ccm.lengths)
173 return GPG_ERR_INV_STATE;
175 c->u_mode.ccm.authlen = taglen;
176 c->u_mode.ccm.encryptlen = encryptlen;
177 c->u_mode.ccm.aadlen = aadlen;
179 /* Complete IV setup. */
180 c->u_iv.iv[0] += (aadlen > 0) * 64 + M_ * 8;
181 for (i = 16 - 1; i >= 1 + noncelen; i--)
183 c->u_iv.iv[i] = encryptlen & 0xff;
187 memcpy (b0, c->u_iv.iv, 16);
188 memset (c->u_iv.iv, 0, 16);
190 set_burn (burn, do_cbc_mac (c, b0, 16, 0));
196 else if (aadlen > 0 && aadlen <= (unsigned int)0xfeff)
198 b0[0] = (aadlen >> 8) & 0xff;
199 b0[1] = aadlen & 0xff;
200 set_burn (burn, do_cbc_mac (c, b0, 2, 0));
202 else if (aadlen > 0xfeff && aadlen <= (unsigned int)0xffffffff)
206 buf_put_be32(&b0[2], aadlen);
207 set_burn (burn, do_cbc_mac (c, b0, 6, 0));
209 else if (aadlen > (unsigned int)0xffffffff)
213 buf_put_be64(&b0[2], aadlen);
214 set_burn (burn, do_cbc_mac (c, b0, 10, 0));
217 /* Generate S_0 and increase counter. */
218 set_burn (burn, c->spec->encrypt ( &c->context.c, c->u_mode.ccm.s0,
223 _gcry_burn_stack (burn + sizeof(void *) * 5);
225 c->u_mode.ccm.lengths = 1;
227 return GPG_ERR_NO_ERROR;
232 _gcry_cipher_ccm_authenticate (gcry_cipher_hd_t c, const unsigned char *abuf,
237 if (abuflen > 0 && !abuf)
238 return GPG_ERR_INV_ARG;
239 if (!c->u_mode.ccm.nonce || !c->u_mode.ccm.lengths || c->marks.tag)
240 return GPG_ERR_INV_STATE;
241 if (abuflen > c->u_mode.ccm.aadlen)
242 return GPG_ERR_INV_LENGTH;
244 c->u_mode.ccm.aadlen -= abuflen;
245 burn = do_cbc_mac (c, abuf, abuflen, c->u_mode.ccm.aadlen == 0);
248 _gcry_burn_stack (burn + sizeof(void *) * 5);
250 return GPG_ERR_NO_ERROR;
255 _gcry_cipher_ccm_tag (gcry_cipher_hd_t c, unsigned char *outbuf,
256 size_t outbuflen, int check)
260 if (!outbuf || outbuflen == 0)
261 return GPG_ERR_INV_ARG;
262 /* Tag length must be same as initial authlen. */
263 if (c->u_mode.ccm.authlen != outbuflen)
264 return GPG_ERR_INV_LENGTH;
265 if (!c->u_mode.ccm.nonce || !c->u_mode.ccm.lengths || c->u_mode.ccm.aadlen > 0)
266 return GPG_ERR_INV_STATE;
267 /* Initial encrypt length must match with length of actual data processed. */
268 if (c->u_mode.ccm.encryptlen > 0)
269 return GPG_ERR_UNFINISHED;
273 burn = do_cbc_mac (c, NULL, 0, 1); /* Perform final padding. */
276 cipher_block_xor (c->u_iv.iv, c->u_iv.iv, c->u_mode.ccm.s0, 16);
278 wipememory (c->u_ctr.ctr, 16);
279 wipememory (c->u_mode.ccm.s0, 16);
280 wipememory (c->u_mode.ccm.macbuf, 16);
283 _gcry_burn_stack (burn + sizeof(void *) * 5);
290 memcpy (outbuf, c->u_iv.iv, outbuflen);
291 return GPG_ERR_NO_ERROR;
295 return buf_eq_const(outbuf, c->u_iv.iv, outbuflen) ?
296 GPG_ERR_NO_ERROR : GPG_ERR_CHECKSUM;
302 _gcry_cipher_ccm_get_tag (gcry_cipher_hd_t c, unsigned char *outtag,
305 return _gcry_cipher_ccm_tag (c, outtag, taglen, 0);
310 _gcry_cipher_ccm_check_tag (gcry_cipher_hd_t c, const unsigned char *intag,
313 return _gcry_cipher_ccm_tag (c, (unsigned char *)intag, taglen, 1);
318 _gcry_cipher_ccm_encrypt (gcry_cipher_hd_t c, unsigned char *outbuf,
319 size_t outbuflen, const unsigned char *inbuf,
324 if (outbuflen < inbuflen)
325 return GPG_ERR_BUFFER_TOO_SHORT;
326 if (!c->u_mode.ccm.nonce || c->marks.tag || !c->u_mode.ccm.lengths ||
327 c->u_mode.ccm.aadlen > 0)
328 return GPG_ERR_INV_STATE;
329 if (inbuflen > c->u_mode.ccm.encryptlen)
330 return GPG_ERR_INV_LENGTH;
332 c->u_mode.ccm.encryptlen -= inbuflen;
333 burn = do_cbc_mac (c, inbuf, inbuflen, 0);
335 _gcry_burn_stack (burn + sizeof(void *) * 5);
337 return _gcry_cipher_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
342 _gcry_cipher_ccm_decrypt (gcry_cipher_hd_t c, unsigned char *outbuf,
343 size_t outbuflen, const unsigned char *inbuf,
349 if (outbuflen < inbuflen)
350 return GPG_ERR_BUFFER_TOO_SHORT;
351 if (!c->u_mode.ccm.nonce || c->marks.tag || !c->u_mode.ccm.lengths ||
352 c->u_mode.ccm.aadlen > 0)
353 return GPG_ERR_INV_STATE;
354 if (inbuflen > c->u_mode.ccm.encryptlen)
355 return GPG_ERR_INV_LENGTH;
357 err = _gcry_cipher_ctr_encrypt (c, outbuf, outbuflen, inbuf, inbuflen);
361 c->u_mode.ccm.encryptlen -= inbuflen;
362 burn = do_cbc_mac (c, outbuf, inbuflen, 0);
364 _gcry_burn_stack (burn + sizeof(void *) * 5);