1/* mpn_mul -- Multiply two natural numbers.
2
3Copyright (C) 1991, 1993, 1994, 1996 Free Software Foundation, Inc.
4
5This file is part of the GNU MP Library.
6
7The GNU MP Library is free software; you can redistribute it and/or modify
8it under the terms of the GNU Lesser General Public License as published by
9the Free Software Foundation; either version 2.1 of the License, or (at your
10option) any later version.
11
12The GNU MP Library is distributed in the hope that it will be useful, but
13WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15License for more details.
16
17You should have received a copy of the GNU Lesser General Public License
18along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20MA 02111-1307, USA. */
21
22#include <config.h>
23#include "gmp-impl.h"
24
25/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
26 and v (pointed to by VP, with VSIZE limbs), and store the result at
27 PRODP. USIZE + VSIZE limbs are always stored, but if the input
28 operands are normalized. Return the most significant limb of the
29 result.
30
31 NOTE: The space pointed to by PRODP is overwritten before finished
32 with U and V, so overlap is an error.
33
34 Argument constraints:
35 1. USIZE >= VSIZE.
36 2. PRODP != UP and PRODP != VP, i.e. the destination
37 must be distinct from the multiplier and the multiplicand. */
38
39/* If KARATSUBA_THRESHOLD is not already defined, define it to a
40 value which is good on most machines. */
41#ifndef KARATSUBA_THRESHOLD
42#define KARATSUBA_THRESHOLD 32
43#endif
44
45mp_limb_t
46#if __STDC__
47mpn_mul (mp_ptr prodp,
48 mp_srcptr up, mp_size_t usize,
49 mp_srcptr vp, mp_size_t vsize)
50#else
51mpn_mul (prodp, up, usize, vp, vsize)
52 mp_ptr prodp;
53 mp_srcptr up;
54 mp_size_t usize;
55 mp_srcptr vp;
56 mp_size_t vsize;
57#endif
58{
59 mp_ptr prod_endp = prodp + usize + vsize - 1;
60 mp_limb_t cy;
61 mp_ptr tspace;
62
63 if (vsize < KARATSUBA_THRESHOLD)
64 {
65 /* Handle simple cases with traditional multiplication.
66
67 This is the most critical code of the entire function. All
68 multiplies rely on this, both small and huge. Small ones arrive
69 here immediately. Huge ones arrive here as this is the base case
70 for Karatsuba's recursive algorithm below. */
71 mp_size_t i;
72 mp_limb_t cy_limb;
73 mp_limb_t v_limb;
74
75 if (vsize == 0)
76 return 0;
77
78 /* Multiply by the first limb in V separately, as the result can be
79 stored (not added) to PROD. We also avoid a loop for zeroing. */
80 v_limb = vp[0];
81 if (v_limb <= 1)
82 {
83 if (v_limb == 1)
84 MPN_COPY (prodp, up, usize);
85 else
86 MPN_ZERO (prodp, usize);
87 cy_limb = 0;
88 }
89 else
90 cy_limb = mpn_mul_1 (prodp, up, usize, v_limb);
91
92 prodp[usize] = cy_limb;
93 prodp++;
94
95 /* For each iteration in the outer loop, multiply one limb from
96 U with one limb from V, and add it to PROD. */
97 for (i = 1; i < vsize; i++)
98 {
99 v_limb = vp[i];
100 if (v_limb <= 1)
101 {
102 cy_limb = 0;
103 if (v_limb == 1)
104 cy_limb = mpn_add_n (prodp, prodp, up, usize);
105 }
106 else
107 cy_limb = mpn_addmul_1 (prodp, up, usize, v_limb);
108
109 prodp[usize] = cy_limb;
110 prodp++;
111 }
112 return cy_limb;
113 }
114
115 tspace = (mp_ptr) alloca (2 * vsize * BYTES_PER_MP_LIMB);
116 MPN_MUL_N_RECURSE (prodp, up, vp, vsize, tspace);
117
118 prodp += vsize;
119 up += vsize;
120 usize -= vsize;
121 if (usize >= vsize)
122 {
123 mp_ptr tp = (mp_ptr) alloca (2 * vsize * BYTES_PER_MP_LIMB);
124 do
125 {
126 MPN_MUL_N_RECURSE (tp, up, vp, vsize, tspace);
127 cy = mpn_add_n (prodp, prodp, tp, vsize);
128 mpn_add_1 (prodp + vsize, tp + vsize, vsize, cy);
129 prodp += vsize;
130 up += vsize;
131 usize -= vsize;
132 }
133 while (usize >= vsize);
134 }
135
136 /* True: usize < vsize. */
137
138 /* Make life simple: Recurse. */
139
140 if (usize != 0)
141 {
142 mpn_mul (tspace, vp, vsize, up, usize);
143 cy = mpn_add_n (prodp, prodp, tspace, vsize);
144 mpn_add_1 (prodp + vsize, tspace + vsize, usize, cy);
145 }
146
147 return *prod_endp;
148}
149