1 /* mpihelp-mul.c - MPI helper functions
2 * Copyright (C) 1994, 1996, 1998, 1999 Free Software Foundation, Inc.
4 * This file is part of GnuPG.
6 * GnuPG is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * GnuPG is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
20 * Note: This code is heavily based on the GNU MP Library.
21 * Actually it's the same code with only minor changes in the
22 * way the data is stored; this is to support the abstraction
23 * of an optional secure memory allocation which may be used
24 * to avoid revealing of sensitive data due to paging etc.
25 * The GNU MP Library itself is published under the LGPL;
26 * however I decided to publish this code under the plain GPL.
32 #include "mpi-internal.h"
37 #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
39 if( (size) < KARATSUBA_THRESHOLD ) \
40 mul_n_basecase (prodp, up, vp, size); \
42 mul_n (prodp, up, vp, size, tspace); \
45 #define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
47 if ((size) < KARATSUBA_THRESHOLD) \
48 mpih_sqr_n_basecase (prodp, up, size); \
50 mpih_sqr_n (prodp, up, size, tspace); \
56 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
57 * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
58 * always stored. Return the most significant limb.
60 * Argument constraints:
61 * 1. PRODP != UP and PRODP != VP, i.e. the destination
62 * must be distinct from the multiplier and the multiplicand.
65 * Handle simple cases with traditional multiplication.
67 * This is the most critical code of multiplication. All multiplies rely
68 * on this, both small and huge. Small ones arrive here immediately. Huge
69 * ones arrive here as this is the base case for Karatsuba's recursive
74 mul_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up,
75 mpi_ptr_t vp, mpi_size_t size)
81 /* Multiply by the first limb in V separately, as the result can be
82 * stored (not added) to PROD. We also avoid a loop for zeroing. */
86 MPN_COPY( prodp, up, size );
88 MPN_ZERO( prodp, size );
92 cy = mpihelp_mul_1( prodp, up, size, v_limb );
97 /* For each iteration in the outer loop, multiply one limb from
98 * U with one limb from V, and add it to PROD. */
99 for( i = 1; i < size; i++ ) {
104 cy = mpihelp_add_n(prodp, prodp, up, size);
107 cy = mpihelp_addmul_1(prodp, up, size, v_limb);
118 mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
119 mpi_size_t size, mpi_ptr_t tspace )
122 /* The size is odd, and the code below doesn't handle that.
123 * Multiply the least significant (size - 1) limbs with a recursive
124 * call, and handle the most significant limb of S1 and S2
126 * A slightly faster way to do this would be to make the Karatsuba
127 * code below behave as if the size were even, and let it check for
128 * odd size in the end. I.e., in essence move this code to the end.
129 * Doing so would save us a recursive call, and potentially make the
130 * stack grow a lot less.
132 mpi_size_t esize = size - 1; /* even size */
135 MPN_MUL_N_RECURSE( prodp, up, vp, esize, tspace );
136 cy_limb = mpihelp_addmul_1( prodp + esize, up, esize, vp[esize] );
137 prodp[esize + esize] = cy_limb;
138 cy_limb = mpihelp_addmul_1( prodp + esize, vp, size, up[esize] );
139 prodp[esize + size] = cy_limb;
142 /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
144 * Split U in two pieces, U1 and U0, such that
145 * U = U0 + U1*(B**n),
146 * and V in V1 and V0, such that
147 * V = V0 + V1*(B**n).
149 * UV is then computed recursively using the identity
152 * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
155 * Where B = 2**BITS_PER_MP_LIMB.
157 mpi_size_t hsize = size >> 1;
161 /* Product H. ________________ ________________
162 * |_____U1 x V1____||____U0 x V0_____|
163 * Put result in upper part of PROD and pass low part of TSPACE
166 MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, tspace);
168 /* Product M. ________________
171 if( mpihelp_cmp(up + hsize, up, hsize) >= 0 ) {
172 mpihelp_sub_n(prodp, up + hsize, up, hsize);
176 mpihelp_sub_n(prodp, up, up + hsize, hsize);
179 if( mpihelp_cmp(vp + hsize, vp, hsize) >= 0 ) {
180 mpihelp_sub_n(prodp + hsize, vp + hsize, vp, hsize);
184 mpihelp_sub_n(prodp + hsize, vp, vp + hsize, hsize);
185 /* No change of NEGFLG. */
187 /* Read temporary operands from low part of PROD.
188 * Put result in low part of TSPACE using upper part of TSPACE
191 MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, tspace + size);
193 /* Add/copy product H. */
194 MPN_COPY (prodp + hsize, prodp + size, hsize);
195 cy = mpihelp_add_n( prodp + size, prodp + size,
196 prodp + size + hsize, hsize);
198 /* Add product M (if NEGFLG M is a negative number) */
200 cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size);
202 cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
204 /* Product L. ________________ ________________
205 * |________________||____U0 x V0_____|
206 * Read temporary operands from low part of PROD.
207 * Put result in low part of TSPACE using upper part of TSPACE
210 MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
212 /* Add/copy Product L (twice) */
214 cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
216 mpihelp_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy);
218 MPN_COPY(prodp, tspace, hsize);
219 cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize);
221 mpihelp_add_1(prodp + size, prodp + size, size, 1);
227 mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size )
233 /* Multiply by the first limb in V separately, as the result can be
234 * stored (not added) to PROD. We also avoid a loop for zeroing. */
238 MPN_COPY( prodp, up, size );
240 MPN_ZERO(prodp, size);
244 cy_limb = mpihelp_mul_1( prodp, up, size, v_limb );
246 prodp[size] = cy_limb;
249 /* For each iteration in the outer loop, multiply one limb from
250 * U with one limb from V, and add it to PROD. */
251 for( i=1; i < size; i++) {
256 cy_limb = mpihelp_add_n(prodp, prodp, up, size);
259 cy_limb = mpihelp_addmul_1(prodp, up, size, v_limb);
261 prodp[size] = cy_limb;
268 mpih_sqr_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
271 /* The size is odd, and the code below doesn't handle that.
272 * Multiply the least significant (size - 1) limbs with a recursive
273 * call, and handle the most significant limb of S1 and S2
275 * A slightly faster way to do this would be to make the Karatsuba
276 * code below behave as if the size were even, and let it check for
277 * odd size in the end. I.e., in essence move this code to the end.
278 * Doing so would save us a recursive call, and potentially make the
279 * stack grow a lot less.
281 mpi_size_t esize = size - 1; /* even size */
284 MPN_SQR_N_RECURSE( prodp, up, esize, tspace );
285 cy_limb = mpihelp_addmul_1( prodp + esize, up, esize, up[esize] );
286 prodp[esize + esize] = cy_limb;
287 cy_limb = mpihelp_addmul_1( prodp + esize, up, size, up[esize] );
289 prodp[esize + size] = cy_limb;
292 mpi_size_t hsize = size >> 1;
295 /* Product H. ________________ ________________
296 * |_____U1 x U1____||____U0 x U0_____|
297 * Put result in upper part of PROD and pass low part of TSPACE
300 MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
302 /* Product M. ________________
305 if( mpihelp_cmp( up + hsize, up, hsize) >= 0 )
306 mpihelp_sub_n( prodp, up + hsize, up, hsize);
308 mpihelp_sub_n (prodp, up, up + hsize, hsize);
310 /* Read temporary operands from low part of PROD.
311 * Put result in low part of TSPACE using upper part of TSPACE
313 MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
315 /* Add/copy product H */
316 MPN_COPY(prodp + hsize, prodp + size, hsize);
317 cy = mpihelp_add_n(prodp + size, prodp + size,
318 prodp + size + hsize, hsize);
320 /* Add product M (if NEGFLG M is a negative number). */
321 cy -= mpihelp_sub_n (prodp + hsize, prodp + hsize, tspace, size);
323 /* Product L. ________________ ________________
324 * |________________||____U0 x U0_____|
325 * Read temporary operands from low part of PROD.
326 * Put result in low part of TSPACE using upper part of TSPACE
328 MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size);
330 /* Add/copy Product L (twice). */
331 cy += mpihelp_add_n (prodp + hsize, prodp + hsize, tspace, size);
333 mpihelp_add_1(prodp + hsize + size, prodp + hsize + size,
336 MPN_COPY(prodp, tspace, hsize);
337 cy = mpihelp_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
339 mpihelp_add_1 (prodp + size, prodp + size, size, 1);
344 /* This should be made into an inline function in gmp.h. */
346 mpihelp_mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
351 if( size < KARATSUBA_THRESHOLD )
352 mpih_sqr_n_basecase( prodp, up, size );
355 secure = m_is_secure( up );
356 tspace = mpi_alloc_limb_space( 2 * size, secure );
357 mpih_sqr_n( prodp, up, size, tspace );
358 mpi_free_limb_space( tspace );
362 if( size < KARATSUBA_THRESHOLD )
363 mul_n_basecase( prodp, up, vp, size );
366 secure = m_is_secure( up ) || m_is_secure( vp );
367 tspace = mpi_alloc_limb_space( 2 * size, secure );
368 mul_n (prodp, up, vp, size, tspace);
369 mpi_free_limb_space( tspace );
375 /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
376 * and v (pointed to by VP, with VSIZE limbs), and store the result at
377 * PRODP. USIZE + VSIZE limbs are always stored, but if the input
378 * operands are normalized. Return the most significant limb of the
381 * NOTE: The space pointed to by PRODP is overwritten before finished
382 * with U and V, so overlap is an error.
384 * Argument constraints:
386 * 2. PRODP != UP and PRODP != VP, i.e. the destination
387 * must be distinct from the multiplier and the multiplicand.
391 mpihelp_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
392 mpi_ptr_t vp, mpi_size_t vsize)
394 mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
398 if( vsize < KARATSUBA_THRESHOLD ) {
405 /* Multiply by the first limb in V separately, as the result can be
406 * stored (not added) to PROD. We also avoid a loop for zeroing. */
410 MPN_COPY( prodp, up, usize );
412 MPN_ZERO( prodp, usize );
416 cy = mpihelp_mul_1( prodp, up, usize, v_limb );
421 /* For each iteration in the outer loop, multiply one limb from
422 * U with one limb from V, and add it to PROD. */
423 for( i = 1; i < vsize; i++ ) {
428 cy = mpihelp_add_n(prodp, prodp, up, usize);
431 cy = mpihelp_addmul_1(prodp, up, usize, v_limb);
440 tspace = mpi_alloc_limb_space( 2 * vsize,
441 m_is_secure( up ) || m_is_secure( vp ) );
442 MPN_MUL_N_RECURSE( prodp, up, vp, vsize, tspace );
447 if( usize >= vsize ) {
448 mpi_ptr_t tp = mpi_alloc_limb_space( 2 * vsize, m_is_secure( up )
449 || m_is_secure( vp ) );
451 MPN_MUL_N_RECURSE( tp, up, vp, vsize, tspace );
452 cy = mpihelp_add_n( prodp, prodp, tp, vsize );
453 mpihelp_add_1( prodp + vsize, tp + vsize, vsize, cy );
457 } while( usize >= vsize );
458 mpi_free_limb_space( tp );
462 mpihelp_mul( tspace, vp, vsize, up, usize );
463 cy = mpihelp_add_n( prodp, prodp, tspace, vsize);
464 mpihelp_add_1( prodp + vsize, tspace + vsize, usize, cy );
467 mpi_free_limb_space( tspace );