diff -Nru a/init/Kconfig b/init/Kconfig
--- a/init/Kconfig	Thu Jan 15 09:29:02 2004
+++ b/init/Kconfig	Thu Jan 15 09:29:02 2004
@@ -285,4 +285,12 @@
 	  runs modprobe with the appropriate arguments, thereby
 	  loading the module if it is available.  If unsure, say Y.
 
+config MODULE_SIG
+	bool "Module signature checking"
+	depends on MODULES
+	help
+	  If you say Y here, signed module support will be enabled.
+	  If unsure, say N.
+
 endmenu
+
diff -Nru a/kernel/module-sig.c b/kernel/module-sig.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/kernel/module-sig.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,87 @@
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
+#include <linux/crypto.h>
+#include <asm/scatterlist.h>
+
+#define SHA1_DIGEST_SIZE        20
+
+extern int rsa_check_sig(char *sig, u8 *sha1);
+
+static u8 sha1_result[SHA1_DIGEST_SIZE];
+int module_check_sig(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const char *secstrings)
+{
+	int i;
+	unsigned int sig_index = 0;
+	static struct crypto_tfm *sha1_tfm;
+	char *sig;
+
+	/* Can't use find_sec as it only checks for Alloc bit */
+	for (i = 1; i < hdr->e_shnum; i++)
+		if (strcmp(secstrings+sechdrs[i].sh_name, "module_sig") == 0) {
+			sig_index = i;
+			break;
+		}
+	if (sig_index <= 0)
+		return 0;
+	printk(KERN_DEBUG "GREG: sig_index = %d sh_size = %d\n", sig_index, sechdrs[sig_index].sh_size);
+
+	sig = (char *)sechdrs[sig_index].sh_addr;
+	printk(KERN_DEBUG "GREG: ");
+	for (i = 0; i < sechdrs[sig_index].sh_size; ++i)
+		printk("%.2x ", (unsigned char)sig[i]);
+	printk("\n");
+
+	sha1_tfm = crypto_alloc_tfm("sha1", 0);
+	if (sha1_tfm == NULL) {
+		printk(KERN_DEBUG "GREG: failed to load transform for sha1\n");
+		return 0;
+	}
+
+	crypto_digest_init(sha1_tfm);
+
+	for (i = 1; i < hdr->e_shnum; i++) {
+		struct scatterlist sg;
+		void *temp;
+		int size;
+		const char *name = secstrings+sechdrs[i].sh_name;
+
+		/* We only care about sections with "text" or "data" in their names */
+		if ((strstr(name, "text") == NULL) &&
+		    (strstr(name, "data") == NULL))
+			continue;
+		/* avoid the ".rel.*" sections too. */
+		if (strstr(name, ".rel.") != NULL)
+			continue;
+
+		temp = (void *)sechdrs[i].sh_addr;
+		size = sechdrs[i].sh_size;
+		printk(KERN_DEBUG "GREG: sha1ing the %s section, size %d\n", name, size);
+		do {
+			memset(&sg, 0x00, sizeof(struct scatterlist));
+			sg.page = virt_to_page(temp);
+			sg.offset = offset_in_page(temp);
+			sg.length = min((unsigned long)size, (PAGE_SIZE - (unsigned long)sg.offset));
+			size -= sg.length;
+			temp += sg.length;
+			printk(KERN_DEBUG "GREG: page = %p, .offset = %d, .length = %d, temp = %p, size = %d\n",
+				sg.page, sg.offset, sg.length, temp, size);
+			crypto_digest_update(sha1_tfm, &sg, 1);
+		} while (size > 0);
+	}
+	crypto_digest_final(sha1_tfm, sha1_result);
+	crypto_free_tfm(sha1_tfm);
+	printk(KERN_DEBUG "GREG: sha1 is: ");
+	for (i = 0; i < sizeof(sha1_result); ++i)
+		printk("%.2x ", sha1_result[i]);
+	printk("\n");
+
+	/* see if this sha1 is really encrypted in the section */
+	return rsa_check_sig(sig, &sha1_result[0]);
+}
+
+
diff -Nru a/kernel/module-sig.h b/kernel/module-sig.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/kernel/module-sig.h	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,13 @@
+#ifndef MODULE_SIG
+#define MODULE_SIG
+
+#ifdef CONFIG_MODULE_SIG
+extern int module_check_sig(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const char *secstrings);
+#else
+static int inline module_check_sig(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const char *secstrings)
+{
+	return 0;
+}
+#endif
+
+#endif
diff -Nru a/kernel/module.c b/kernel/module.c
--- a/kernel/module.c	Thu Jan 15 09:28:58 2004
+++ b/kernel/module.c	Thu Jan 15 09:28:58 2004
@@ -1494,6 +1502,11 @@
 		goto free_hdr;
 	}
 
+	if (module_check_sig(hdr, sechdrs, secstrings)) {
+		err = -EPERM;
+		goto free_hdr;
+	}
+
 	/* Now copy in args */
 	arglen = strlen_user(uargs);
 	if (!arglen) {
diff -Nru a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig	Thu Jan 15 09:29:13 2004
+++ b/security/Kconfig	Thu Jan 15 09:29:13 2004
@@ -46,5 +46,7 @@
 
 source security/selinux/Kconfig
 
+source security/rsa/Kconfig
+
 endmenu
 
diff -Nru a/security/Makefile b/security/Makefile
--- a/security/Makefile	Thu Jan 15 09:29:17 2004
+++ b/security/Makefile	Thu Jan 15 09:29:17 2004
@@ -3,6 +3,10 @@
 #
 
 subdir-$(CONFIG_SECURITY_SELINUX)	+= selinux
+subdir-$(CONFIG_SECURITY_RSA)	+= rsa
+ifeq ($(CONFIG_SECURITY_RSA),y)
+	obj-$(CONFIG_SECURITY_RSA)	+= rsa/built-in.o
+endif
 
 # if we don't select a security model, use the default capabilities
 ifneq ($(CONFIG_SECURITY),y)
diff -Nru a/security/rsa/Kconfig b/security/rsa/Kconfig
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/Kconfig	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,7 @@
+config SECURITY_RSA
+	tristate "RSA crap"
+	help
+	  bah bah bah...
+
+	  If you are unsure how to answer this question, answer N.
+
diff -Nru a/security/rsa/Makefile b/security/rsa/Makefile
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/Makefile	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,33 @@
+#rsacore-objs := rsa.o mpi.o mpi_generic.o
+
+rsacore-objs :=						\
+		gnupg_mpi_generic_mpih-lshift.o		\
+		gnupg_mpi_generic_mpih-mul1.o		\
+		gnupg_mpi_generic_mpih-mul2.o		\
+		gnupg_mpi_generic_mpih-mul3.o		\
+		gnupg_mpi_generic_mpih-rshift.o		\
+		gnupg_mpi_generic_mpih-sub1.o		\
+		gnupg_mpi_generic_mpih-add1.o		\
+		gnupg_mpi_generic_udiv-w-sdiv.o		\
+		gnupg_mpi_mpicoder.o			\
+		gnupg_mpi_mpi-add.o			\
+		gnupg_mpi_mpi-bit.o			\
+		gnupg_mpi_mpi-div.o			\
+		gnupg_mpi_mpi-cmp.o			\
+		gnupg_mpi_mpi-gcd.o			\
+		gnupg_mpi_mpih-cmp.o			\
+		gnupg_mpi_mpih-div.o			\
+		gnupg_mpi_mpih-mul.o			\
+		gnupg_mpi_mpi-inline.o			\
+		gnupg_mpi_mpi-inv.o			\
+		gnupg_mpi_mpi-mpow.o			\
+		gnupg_mpi_mpi-mul.o			\
+		gnupg_mpi_mpi-pow.o			\
+		gnupg_mpi_mpi-scan.o			\
+		gnupg_mpi_mpiutil.o			\
+		gnupg_cipher_rsa-verify.o		\
+		rsa.o					\
+		rsa_key.o
+
+obj-$(CONFIG_SECURITY_RSA) += rsacore.o
+
diff -Nru a/security/rsa/foo.h b/security/rsa/foo.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/foo.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,357 @@
+#ifndef FOO_H
+#define FOO_H
+
+/* The size of a `unsigned int', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_INT 4
+
+/* The size of a `unsigned long', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_LONG 4
+
+/* The size of a `unsigned long long', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_LONG_LONG 8
+
+/* The size of a `unsigned short', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_SHORT 2
+
+#ifndef UMUL_TIME
+  #define UMUL_TIME 1
+#endif
+#ifndef UDIV_TIME
+  #define UDIV_TIME UMUL_TIME
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c.  */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+
+#define BYTES_PER_MPI_LIMB  (SIZEOF_UNSIGNED_LONG)
+
+#ifndef BITS_PER_MPI_LIMB
+#if BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_INT
+  typedef unsigned int mpi_limb_t;
+  typedef   signed int mpi_limb_signed_t;
+#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG
+  typedef unsigned long int mpi_limb_t;
+  typedef   signed long int mpi_limb_signed_t;
+#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG_LONG
+  typedef unsigned long long int mpi_limb_t;
+  typedef   signed long long int mpi_limb_signed_t;
+#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_SHORT
+  typedef unsigned short int mpi_limb_t;
+  typedef   signed short int mpi_limb_signed_t;
+#else
+  #error BYTES_PER_MPI_LIMB does not match any C type
+#endif
+#define BITS_PER_MPI_LIMB    (8*BYTES_PER_MPI_LIMB)
+#endif /*BITS_PER_MPI_LIMB*/
+
+
+
+#define W_TYPE_SIZE BITS_PER_MPI_LIMB
+
+//typedef unsigned long mpi_limb_t;
+//typedef   signed long mpi_libb_signed_t;
+typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
+
+typedef mpi_limb_t UWtype;
+typedef unsigned int UHWtype;
+
+typedef size_t mpi_size_t;
+
+struct gcry_mpi {
+    int alloced;    /* array size (# of allocated limbs) */
+    int nlimbs;     /* number of valid limbs */
+    int sign;       /* indicates a negative number and is used for opaque
+                     * MPIs to store the length */
+    unsigned flags; /* bit 0: array must be allocated in secure memory space */
+                    /* bit 2: the limb is a pointer to some m_alloced data */
+    mpi_limb_t *d;  /* array with the limbs */
+};
+
+struct karatsuba_ctx {
+    struct karatsuba_ctx *next;
+    mpi_ptr_t tspace;
+    mpi_size_t tspace_size;
+    mpi_ptr_t tp;
+    mpi_size_t tp_size;
+};
+
+typedef struct gcry_mpi *GcryMPI;
+typedef struct gcry_mpi *GCRY_MPI;
+
+#define mpi_get_nlimbs(a)	((a)->nlimbs)
+#define mpi_is_neg(a)		((a)->sign)
+#define mpi_is_secure(a)	((a) && ((a)->flags&1))
+
+
+
+extern struct gcry_mpi *gcry_mpi_new( unsigned int nbits );
+extern struct gcry_mpi *mpi_alloc_secure( unsigned nlimbs );
+extern void mpi_free(struct gcry_mpi *a );
+
+extern mpi_ptr_t mpi_alloc_limb_space( unsigned nlimbs, int sec );
+
+extern void mpi_sub_ui(struct gcry_mpi *w, struct gcry_mpi *u, unsigned long v);
+extern void mpi_add(struct gcry_mpi *w, struct gcry_mpi *u, struct gcry_mpi *v);
+extern void mpi_fdiv_r(struct gcry_mpi *rem, struct gcry_mpi *dividend, struct gcry_mpi *divisor);
+extern void mpi_powm(struct gcry_mpi *res, struct gcry_mpi *base, struct gcry_mpi *exp, struct gcry_mpi *mod);
+extern void mpi_sub(struct gcry_mpi *w, struct gcry_mpi *u, struct gcry_mpi *v);
+extern void mpi_mul(struct gcry_mpi *w, struct gcry_mpi *u,struct gcry_mpi *v);
+extern void mpi_mulm(struct gcry_mpi *w, struct gcry_mpi *u,struct gcry_mpi *v, struct gcry_mpi *m);
+
+#define assert(x)	
+
+#define RESIZE_IF_NEEDED(a,b) \
+    do {                           \
+        if( (a)->alloced < (b) )   \
+            mpi_resize((a), (b));  \
+    } while(0)
+
+
+/* Copy N limbs from S to D.  */
+#define MPN_COPY( d, s, n) \
+    do {                                \
+        size_t _i;                  \
+        for( _i = 0; _i < (n); _i++ )   \
+            (d)[_i] = (s)[_i];          \
+    } while(0)
+
+/* Zero N limbs at D */
+#define MPN_ZERO(d, n) \
+    do {                                  \
+        int  _i;                          \
+        for( _i = 0; _i < (n); _i++ )  \
+            (d)[_i] = 0;                    \
+    } while (0)
+
+
+#define MPN_NORMALIZE(d, n)  \
+    do {                       \
+        while( (n) > 0 ) {     \
+            if( (d)[(n)-1] ) \
+                break;         \
+            (n)--;             \
+        }                      \
+    } while(0)
+
+#define MPN_COPY_DECR( d, s, n ) \
+    do {                                \
+        mpi_size_t _i;                  \
+        for( _i = (n)-1; _i >= 0; _i--) \
+           (d)[_i] = (s)[_i];           \
+    } while(0)
+
+
+
+#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
+    do {                                                \
+        if( (size) < KARATSUBA_THRESHOLD )              \
+            mul_n_basecase (prodp, up, vp, size);       \
+        else                                            \
+            mul_n (prodp, up, vp, size, tspace);        \
+    } while (0);
+
+
+#define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
+    do {                                            \
+        if ((size) < KARATSUBA_THRESHOLD)           \
+            _gcry_mpih_sqr_n_basecase (prodp, up, size);         \
+        else                                        \
+            _gcry_mpih_sqr_n (prodp, up, size, tspace);  \
+    } while (0);
+
+
+#ifndef KARATSUBA_THRESHOLD
+    #define KARATSUBA_THRESHOLD 16
+#endif
+
+
+#if !defined (count_leading_zeros)
+extern const unsigned char __clz_tab[];
+#define MPI_INTERNAL_NEED_CLZ_TAB 1
+#define __BITS4 (W_TYPE_SIZE / 4)
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  do {                                                                  \
+    UWtype __x;                                                         \
+    __x = (al) + (bl);                                                  \
+    (sh) = (ah) + (bh) + (__x < (al));                                  \
+    (sl) = __x;                                                         \
+  } while (0)
+#endif
+
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  do {                                                                  \
+    UWtype __x;                                                         \
+    __x = (al) - (bl);                                                  \
+    (sh) = (ah) - (bh) - (__x > (al));                                  \
+    (sl) = __x;                                                         \
+  } while (0)
+#endif
+
+
+
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v)                                         \
+  do {                                                                  \
+    UWtype __x0, __x1, __x2, __x3;                                      \
+    UHWtype __ul, __vl, __uh, __vh;                                     \
+    UWtype __u = (u), __v = (v);                                        \
+                                                                        \
+    __ul = __ll_lowpart (__u);                                          \
+    __uh = __ll_highpart (__u);                                         \
+    __vl = __ll_lowpart (__v);                                          \
+    __vh = __ll_highpart (__v);                                         \
+                                                                        \
+    __x0 = (UWtype) __ul * __vl;                                        \
+    __x1 = (UWtype) __ul * __vh;                                        \
+    __x2 = (UWtype) __uh * __vl;                                        \
+    __x3 = (UWtype) __uh * __vh;                                        \
+                                                                        \
+    __x1 += __ll_highpart (__x0);/* this can't give carry */            \
+    __x1 += __x2;               /* but this indeed can */               \
+    if (__x1 < __x2)            /* did we get it? */                    \
+      __x3 += __ll_B;           /* yes, add it in the proper pos. */    \
+                                                                        \
+    (w1) = __x3 + __ll_highpart (__x1);                                 \
+    (w0) = (__ll_lowpart (__x1) << W_TYPE_SIZE/2) + __ll_lowpart (__x0);\
+  } while (0)
+#endif
+
+
+#define count_leading_zeros(count, x) \
+  do {                                                                  \
+    UWtype __xr = (x);                                                  \
+    UWtype __a;                                                         \
+                                                                        \
+    if (W_TYPE_SIZE <= 32)                                              \
+      {                                                                 \
+        __a = __xr < ((UWtype) 1 << 2*__BITS4)                          \
+          ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4)              \
+          : (__xr < ((UWtype) 1 << 3*__BITS4) ?  2*__BITS4 : 3*__BITS4);\
+      }                                                                 \
+    else                                                                \
+      {                                                                 \
+        for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8)                  \
+          if (((__xr >> __a) & 0xff) != 0)                              \
+            break;                                                      \
+      }                                                                 \
+                                                                        \
+    (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a);             \
+  } while (0)
+#endif
+
+
+
+
+extern mpi_limb_t _gcry_mpih_sub_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, size_t s1_size, mpi_limb_t s2_limb );
+extern mpi_limb_t _gcry_mpih_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, size_t size);
+extern mpi_limb_t _gcry_mpih_add_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size);
+extern mpi_limb_t _gcry_mpih_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned int cnt);
+extern mpi_limb_t _gcry_mpih_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt);
+extern mpi_limb_t _gcry_mpih_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb);
+extern mpi_limb_t _gcry_mpih_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb);
+extern mpi_limb_t _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb);
+
+static __inline__  mpi_limb_t
+_gcry_mpih_sub( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, size_t s1_size,
+                                mpi_ptr_t s2_ptr, size_t s2_size)
+{
+    mpi_limb_t cy = 0;
+
+    if( s2_size )
+        cy = _gcry_mpih_sub_n(res_ptr, s1_ptr, s2_ptr, s2_size);
+
+    if( s1_size - s2_size )
+        cy = _gcry_mpih_sub_1(res_ptr + s2_size, s1_ptr + s2_size,
+                                      s1_size - s2_size, cy);
+    return cy;
+}
+
+
+
+static __inline__  mpi_limb_t
+_gcry_mpih_add_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+               mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+    mpi_limb_t x;
+
+    x = *s1_ptr++;
+    s2_limb += x;
+    *res_ptr++ = s2_limb;
+    if( s2_limb < x ) { /* sum is less than the left operand: handle carry */
+        while( --s1_size ) {
+            x = *s1_ptr++ + 1;  /* add carry */
+            *res_ptr++ = x;     /* and store */
+            if( x )             /* not 0 (no overflow): we can stop */
+                goto leave;
+        }
+        return 1; /* return carry (size of s1 to small) */
+    }
+
+  leave:
+    if( res_ptr != s1_ptr ) { /* not the same variable */
+        mpi_size_t i;          /* copy the rest */
+        for( i=0; i < s1_size-1; i++ )
+            res_ptr[i] = s1_ptr[i];
+    }
+    return 0; /* no carry */
+}
+
+static __inline__ mpi_limb_t
+_gcry_mpih_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+                               mpi_ptr_t s2_ptr, mpi_size_t s2_size)
+{
+    mpi_limb_t cy = 0;
+
+    if( s2_size )
+        cy = _gcry_mpih_add_n( res_ptr, s1_ptr, s2_ptr, s2_size );
+
+    if( s1_size - s2_size )
+        cy = _gcry_mpih_add_1( res_ptr + s2_size, s1_ptr + s2_size,
+                            s1_size - s2_size, cy);
+    return cy;
+}
+
+
+
+
+/****************
+ * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE.
+ * There are no restrictions on the relative sizes of
+ * the two arguments.
+ * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2.
+ */
+static __inline__ int
+_gcry_mpih_cmp( mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size )
+{
+    mpi_size_t i;
+    mpi_limb_t op1_word, op2_word;
+
+    for( i = size - 1; i >= 0 ; i--) {
+        op1_word = op1_ptr[i];
+        op2_word = op2_ptr[i];
+        if( op1_word != op2_word )
+            goto diff;
+    }
+    return 0;
+
+  diff:
+    /* This can *not* be simplified to
+     *   op2_word - op2_word
+     * since that expression might give signed overflow.  */
+    return (op1_word > op2_word) ? 1 : -1;
+}
+
+
+#endif
diff -Nru a/security/rsa/gnupg_cipher_rsa-verify.c b/security/rsa/gnupg_cipher_rsa-verify.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_cipher_rsa-verify.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,112 @@
+/* rsa.c  -  RSA function
+ *	Copyright (C) 1997, 1998, 1999 by Werner Koch (dd9jn)
+ *	Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+/* This code uses an algorithm protected by U.S. Patent #4,405,829
+   which expires on September 20, 2000.  The patent holder placed that
+   patent into the public domain on Sep 6th, 2000.
+*/
+ 
+#include "gnupg_mpi_mpi.h"
+#include "gnupg_cipher_rsa-verify.h"
+
+#define G10ERR_PUBKEY_ALGO   4      /* Unknown pubkey algorithm */
+#define G10ERR_BAD_SIGN      8      /* Bad signature */
+#define PUBKEY_USAGE_SIG     1	    /* key is good for signatures */
+#define PUBKEY_USAGE_ENC     2	    /* key is good for encryption */
+
+
+typedef struct {
+    MPI n;	    /* modulus */
+    MPI e;	    /* exponent */
+} RSA_public_key;
+
+
+typedef struct {
+    MPI n;	    /* public modulus */
+    MPI e;	    /* public exponent */
+    MPI d;	    /* exponent */
+    MPI p;	    /* prime  p. */
+    MPI q;	    /* prime  q. */
+    MPI u;	    /* inverse of p mod q. */
+} RSA_secret_key;
+
+
+static void public(MPI output, MPI input, RSA_public_key *skey );
+
+
+/****************
+ * Public key operation. Encrypt INPUT with PKEY and put result into OUTPUT.
+ *
+ *	c = m^e mod n
+ *
+ * Where c is OUTPUT, m is INPUT and e,n are elements of PKEY.
+ */
+static void
+public(MPI output, MPI input, RSA_public_key *pkey )
+{
+    if( output == input ) { /* powm doesn't like output and input the same */
+	MPI x = mpi_alloc( mpi_get_nlimbs(input)*2 );
+	mpi_powm( x, input, pkey->e, pkey->n );
+	mpi_set(output, x);
+	mpi_free(x);
+    }
+    else
+	mpi_powm( output, input, pkey->e, pkey->n );
+}
+
+
+/*********************************************
+ **************  interface  ******************
+ *********************************************/
+
+int
+rsa_verify( MPI hash, MPI *data, MPI *pkey)
+{
+  RSA_public_key pk;
+  MPI result;
+  int rc;
+
+  pk.n = pkey[0];
+  pk.e = pkey[1];
+  result = mpi_alloc( (160+(BITS_PER_MPI_LIMB-1))/BITS_PER_MPI_LIMB);
+  public( result, data[0], &pk );
+  rc = mpi_cmp( result, hash )? G10ERR_BAD_SIGN:0;
+  mpi_free(result);
+
+  return rc;
+}
+
+
+int rsa_encrypt(MPI *result, MPI data, MPI *pkey)
+{
+	RSA_public_key pk;
+
+	pk.n = pkey[0];
+	pk.e = pkey[1];
+	result[0] = mpi_alloc(mpi_get_nlimbs(pk.n));
+	public(result[0], data, &pk);
+	return 0;
+}
+
+
+
+
+
diff -Nru a/security/rsa/gnupg_cipher_rsa-verify.h b/security/rsa/gnupg_cipher_rsa-verify.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_cipher_rsa-verify.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,29 @@
+/* rsa.h
+ *	Copyright (C) 1997,1998 by Werner Koch (dd9jn)
+ *	Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#ifndef G10_RSA_H
+#define G10_RSA_H
+
+#include "gnupg_mpi_mpi.h"
+
+int rsa_verify( MPI hash, MPI *data, MPI *pkey);
+
+#endif /*G10_RSA_H*/
diff -Nru a/security/rsa/gnupg_mpi_config.h b/security/rsa/gnupg_mpi_config.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_config.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,540 @@
+/* config.h.  Generated by configure.  */
+/* config.h.in.  Generated from configure.ac by autoheader.  */
+
+/* Defined if the host has big endian byte ordering */
+/* #undef BIG_ENDIAN_HOST */
+
+/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
+   systems. This function is required for `alloca.c' support on those systems.
+   */
+/* #undef CRAY_STACKSEG_END */
+
+/* Define to 1 if using `alloca.c'. */
+/* #undef C_ALLOCA */
+
+/* define to disable keyserver helpers */
+/* #undef DISABLE_KEYSERVER_HELPERS */
+
+/* define to disable exec-path for keyserver helpers */
+/* #undef DISABLE_KEYSERVER_PATH */
+
+/* define to disable photo viewing */
+/* #undef DISABLE_PHOTO_VIEWER */
+
+/* Define if you don't want the default EGD socket name. For details see
+   cipher/rndegd.c */
+#define EGD_SOCKET_NAME ""
+
+/* Define to 1 if translation of program messages to the user's native
+   language is requested. */
+#define ENABLE_NLS 1
+
+/* if set, restrict photo-viewer to this */
+/* #undef FIXED_PHOTO_VIEWER */
+
+/* Define to 1 if you have `alloca', as a function or macro. */
+#define HAVE_ALLOCA 1
+
+/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix).
+   */
+#define HAVE_ALLOCA_H 1
+
+/* Define to 1 if you have the <argz.h> header file. */
+#define HAVE_ARGZ_H 1
+
+/* Define to 1 if you have the `atexit' function. */
+#define HAVE_ATEXIT 1
+
+/* Define if `gethrtime(2)' does not work correctly i.e. issues a SIGILL. */
+/* #undef HAVE_BROKEN_GETHRTIME */
+
+/* Defined if the mlock() call does not work */
+/* #undef HAVE_BROKEN_MLOCK */
+
+/* Defined if a `byte' is typedef'd */
+/* #undef HAVE_BYTE_TYPEDEF */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef HAVE_CLOCK_GETTIME */
+
+/* Define to 1 if you have the `ctermid' function. */
+#define HAVE_CTERMID 1
+
+/* Define if the GNU dcgettext() function is already present or preinstalled.
+   */
+#define HAVE_DCGETTEXT 1
+
+/* Define to 1 if you have the declaration of `sys_siglist', and to 0 if you
+   don't. */
+#define HAVE_DECL_SYS_SIGLIST 0
+
+/* defined if the system supports a random device */
+#define HAVE_DEV_RANDOM 1
+
+/* Define to 1 if you have the <direct.h> header file. */
+/* #undef HAVE_DIRECT_H */
+
+/* Define to 1 if you have the `dlopen' function. */
+/* #undef HAVE_DLOPEN */
+
+/* Defined when the dlopen function family is available */
+#define HAVE_DL_DLOPEN 1
+
+/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
+/* #undef HAVE_DOPRNT */
+
+/* defined if we run on some of the PCDOS like systems (DOS, Windoze. OS/2)
+   with special properties like no file modes */
+/* #undef HAVE_DOSISH_SYSTEM */
+
+/* defined if we must run on a stupid file system */
+/* #undef HAVE_DRIVE_LETTERS */
+
+/* Define to 1 if you have the `feof_unlocked' function. */
+#define HAVE_FEOF_UNLOCKED 1
+
+/* Define to 1 if you have the `fgets_unlocked' function. */
+#define HAVE_FGETS_UNLOCKED 1
+
+/* Define to 1 if you have the `fork' function. */
+#define HAVE_FORK 1
+
+/* Define to 1 if fseeko (and presumably ftello) exists and is declared. */
+#define HAVE_FSEEKO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getc_unlocked' function. */
+#define HAVE_GETC_UNLOCKED 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getgid' function. */
+#define HAVE_GETGID 1
+
+/* Define if you have the `gethrtime(2)' function. */
+/* #undef HAVE_GETHRTIME */
+
+/* Define to 1 if you have the `getpagesize' function. */
+#define HAVE_GETPAGESIZE 1
+
+/* Define to 1 if you have the `getrusage' function. */
+#define HAVE_GETRUSAGE 1
+
+/* Define if the GNU gettext() function is already present or preinstalled. */
+#define HAVE_GETTEXT 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `getuid' function. */
+#define HAVE_GETUID 1
+
+/* Define if you have the iconv() function. */
+#define HAVE_ICONV 1
+
+/* Define if <inttypes.h> exists and doesn't clash with <sys/types.h>. */
+#define HAVE_INTTYPES_H 1
+
+/* Define if <inttypes.h> exists, doesn't clash with <sys/types.h>, and
+   declares uintmax_t. */
+#define HAVE_INTTYPES_H_WITH_UINTMAX 1
+
+/* Define if you have <langinfo.h> and nl_langinfo(CODESET). */
+#define HAVE_LANGINFO_CODESET 1
+
+/* Define to 1 if you have the <langinfo.h> header file. */
+#define HAVE_LANGINFO_H 1
+
+/* Define if your <locale.h> file defines LC_MESSAGES. */
+#define HAVE_LC_MESSAGES 1
+
+/* Define to 1 if you have the `dl' library (-ldl). */
+#define HAVE_LIBDL 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+/* #undef HAVE_LIBRT */
+
+/* Define to 1 if you have the <limits.h> header file. */
+#define HAVE_LIMITS_H 1
+
+/* Define to 1 if you have the <locale.h> header file. */
+#define HAVE_LOCALE_H 1
+
+/* Define to 1 if you have the <malloc.h> header file. */
+#define HAVE_MALLOC_H 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mempcpy' function. */
+#define HAVE_MEMPCPY 1
+
+/* Define to 1 if you have the `mkdtemp' function. */
+#define HAVE_MKDTEMP 1
+
+/* Defined if the system supports an mlock() call */
+#define HAVE_MLOCK 1
+
+/* Define to 1 if you have the `mmap' function. */
+#define HAVE_MMAP 1
+
+/* Define to 1 if you have the `munmap' function. */
+#define HAVE_MUNMAP 1
+
+/* Define to 1 if you have the `nl_langinfo' function. */
+#define HAVE_NL_LANGINFO 1
+
+/* Define to 1 if you have the <nl_types.h> header file. */
+#define HAVE_NL_TYPES_H 1
+
+/* Define to 1 if you have the `pipe' function. */
+#define HAVE_PIPE 1
+
+/* Define to 1 if you have the `plock' function. */
+/* #undef HAVE_PLOCK */
+
+/* Define to 1 if you have the `putenv' function. */
+#define HAVE_PUTENV 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if you have the `rand' function. */
+#define HAVE_RAND 1
+
+/* Define to 1 if you have the `setenv' function. */
+#define HAVE_SETENV 1
+
+/* Define to 1 if you have the `setlocale' function. */
+#define HAVE_SETLOCALE 1
+
+/* Define to 1 if you have the `setrlimit' function. */
+#define HAVE_SETRLIMIT 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `sigprocmask' function. */
+#define HAVE_SIGPROCMASK 1
+
+/* Define to 1 if you have the `stat' function. */
+#define HAVE_STAT 1
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#define HAVE_STDDEF_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define if <stdint.h> exists, doesn't clash with <sys/types.h>, and declares
+   uintmax_t. */
+#define HAVE_STDINT_H_WITH_UINTMAX 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `stpcpy' function. */
+#define HAVE_STPCPY 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+#define HAVE_STRCASECMP 1
+
+/* Define to 1 if you have the `strdup' function. */
+#define HAVE_STRDUP 1
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the `strftime' function. */
+#define HAVE_STRFTIME 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlwr' function. */
+/* #undef HAVE_STRLWR */
+
+/* Define to 1 if you have the `strncasecmp' function. */
+#define HAVE_STRNCASECMP 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if you have the <sys/capability.h> header file. */
+/* #undef HAVE_SYS_CAPABILITY_H */
+
+/* Define to 1 if you have the <sys/ipc.h> header file. */
+#define HAVE_SYS_IPC_H 1
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+/* #undef HAVE_SYS_MMAN_H */
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/shm.h> header file. */
+#define HAVE_SYS_SHM_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the `tcgetattr' function. */
+#define HAVE_TCGETATTR 1
+
+/* Define to 1 if you have the <termio.h> header file. */
+#define HAVE_TERMIO_H 1
+
+/* Define to 1 if you have the `tsearch' function. */
+#define HAVE_TSEARCH 1
+
+/* Defined if a `u16' is typedef'd */
+/* #undef HAVE_U16_TYPEDEF */
+
+/* Defined if a `u32' is typedef'd */
+/* #undef HAVE_U32_TYPEDEF */
+
+/* Defined if a `ulong' is typedef'd */
+#define HAVE_ULONG_TYPEDEF 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define if you have the unsigned long long type. */
+#define HAVE_UNSIGNED_LONG_LONG 1
+
+/* Defined if a `ushort' is typedef'd */
+#define HAVE_USHORT_TYPEDEF 1
+
+/* Define to 1 if you have the `vfork' function. */
+#define HAVE_VFORK 1
+
+/* Define to 1 if you have the <vfork.h> header file. */
+/* #undef HAVE_VFORK_H */
+
+/* Define to 1 if you have the `vprintf' function. */
+#define HAVE_VPRINTF 1
+
+/* Define to 1 if you have the `wait4' function. */
+#define HAVE_WAIT4 1
+
+/* Define to 1 if you have the `waitpid' function. */
+#define HAVE_WAITPID 1
+
+/* Define to 1 if `fork' works. */
+#define HAVE_WORKING_FORK 1
+
+/* Define to 1 if `vfork' works. */
+#define HAVE_WORKING_VFORK 1
+
+/* Define to 1 if you have the `__argz_count' function. */
+#define HAVE___ARGZ_COUNT 1
+
+/* Define to 1 if you have the `__argz_next' function. */
+#define HAVE___ARGZ_NEXT 1
+
+/* Define to 1 if you have the `__argz_stringify' function. */
+#define HAVE___ARGZ_STRINGIFY 1
+
+/* Define as const if the declaration of iconv() needs const. */
+#define ICONV_CONST 
+
+/* Define if integer division by zero raises signal SIGFPE. */
+#define INTDIV0_RAISES_SIGFPE 1
+
+/* Defined if a SysV shared memory supports the LOCK flag */
+#define IPC_HAVE_SHM_LOCK 1
+
+/* Defined if we can do a deferred shm release */
+#define IPC_RMID_DEFERRED_RELEASE 1
+
+/* Defined if this is not a regular release */
+/* #undef IS_DEVELOPMENT_VERSION */
+
+/* Defined if the host has little endian byte ordering */
+#define LITTLE_ENDIAN_HOST 1
+
+/* Defined if mkdir() does not take permission flags */
+/* #undef MKDIR_TAKES_ONE_ARG */
+
+/* Define to use the (obsolete) malloc guarding feature */
+/* #undef M_GUARD */
+
+/* defined to the name of the strong random device */
+#define NAME_OF_DEV_RANDOM "/dev/random"
+
+/* defined to the name of the weaker random device */
+#define NAME_OF_DEV_URANDOM "/dev/urandom"
+
+/* Define if the LDAP library requires including lber.h before ldap.h */
+/* #undef NEED_LBER_H */
+
+/* Define to disable all external program execution */
+/* #undef NO_EXEC */
+
+/* Name of this package */
+#define PACKAGE "gnupg"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "bug-gnupg@gnu.org"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "gnupg"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "gnupg 1.2.2"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "gnupg"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "1.2.2"
+
+/* A human readable text with the name of the OS */
+#define PRINTABLE_OS_NAME "GNU/Linux"
+
+/* Define if <inttypes.h> exists and defines unusable PRI* macros. */
+/* #undef PRI_MACROS_BROKEN */
+
+/* Define as the return type of signal handlers (`int' or `void'). */
+#define RETSIGTYPE void
+
+/* The size of a `unsigned int', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_INT 4
+
+/* The size of a `unsigned long', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_LONG 4
+
+/* The size of a `unsigned long long', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_LONG_LONG 8
+
+/* The size of a `unsigned short', as computed by sizeof. */
+#define SIZEOF_UNSIGNED_SHORT 2
+
+/* If using the C implementation of alloca, define if you know the
+   direction of stack growth for your system; otherwise it will be
+   automatically deduced at run-time.
+        STACK_DIRECTION > 0 => grows toward higher addresses
+        STACK_DIRECTION < 0 => grows toward lower addresses
+        STACK_DIRECTION = 0 => direction of growth unknown */
+/* #undef STACK_DIRECTION */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Allow to select random modules at runtime. */
+/* #undef USE_ALL_RANDOM_MODULES */
+
+/* define if capabilities should be used */
+/* #undef USE_CAPABILITIES */
+
+/* define to enable the use of extensions */
+#define USE_DYNAMIC_LINKING 1
+
+/* define to use the experimental external HKP keyserver interface */
+/* #undef USE_EXTERNAL_HKP */
+
+/* Define to use the old fake OID for TIGER/192 digest support */
+/* #undef USE_OLD_TIGER */
+
+/* set this to limit filenames to the 8.3 format */
+/* #undef USE_ONLY_8DOT3 */
+
+/* Defined if the EGD based RNG should be used. */
+/* #undef USE_RNDEGD */
+
+/* Defined if the /dev/random based RNG should be used. */
+#define USE_RNDLINUX 1
+
+/* Defined if the default Unix RNG should be used. */
+/* #undef USE_RNDUNIX */
+
+/* Defined if the Windows specific RNG should be used. */
+/* #undef USE_RNDW32 */
+
+/* Define to include read-only SHA-384 and SHA-512 digest support */
+/* #undef USE_SHA512 */
+
+/* define if the shared memory interface should be made available */
+#define USE_SHM_COPROCESSING 1
+
+/* because the Unix gettext has too much overhead on MingW32 systems and these
+   systems lack Posix functions, we use a simplified version of gettext */
+/* #undef USE_SIMPLE_GETTEXT */
+
+/* Define to include experimental TIGER/192 digest support */
+/* #undef USE_TIGER */
+
+/* Version of this package */
+#define VERSION "1.2.2"
+
+/* Defined if compiled symbols have a leading underscore */
+/* #undef WITH_SYMBOL_UNDERSCORE */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+#define _FILE_OFFSET_BITS 64
+
+/* Some tests rely on this (stpcpy) and it should be used for new programs
+   anyway */
+#define _GNU_SOURCE 1
+
+/* Define to 1 to make fseeko visible on some hosts (e.g. glibc 2.2). */
+/* #undef _LARGEFILE_SOURCE */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define as `__inline' if that's what the C compiler calls it, or to nothing
+   if it is not supported. */
+/* #undef inline */
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef mode_t */
+
+/* Define to `long' if <sys/types.h> does not define. */
+/* #undef off_t */
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned long or unsigned long long if <inttypes.h> and
+   <stdint.h> don't define. */
+/* #undef uintmax_t */
+
+/* Define as `fork' if `vfork' does not work. */
+/* #undef vfork */
+
+/* Define to empty if the keyword `volatile' does not work. Warning: valid
+   code using `volatile' can become incorrect without. Disable with care. */
+/* #undef volatile */
+
+
+#if !(defined(HAVE_FORK) && defined(HAVE_PIPE) && defined(HAVE_WAITPID))
+#define EXEC_TEMPFILE_ONLY
+#endif
+
+#include "gnupg_mpi_g10defs.h"
+
diff -Nru a/security/rsa/gnupg_mpi_g10defs.h b/security/rsa/gnupg_mpi_g10defs.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_g10defs.h	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,41 @@
+/* Generated automatically by configure */
+#ifdef HAVE_DRIVE_LETTERS
+  #define G10_LOCALEDIR     "c:\\lib\\gnupg\\locale"
+  #define GNUPG_LIBDIR      "c:\\lib\\gnupg"
+  #define GNUPG_LIBEXECDIR  "c:\\lib\\gnupg"
+  #define GNUPG_DATADIR     "c:\\lib\\gnupg"
+  #define GNUPG_HOMEDIR     "c:\\gnupg"
+#else
+  #define G10_LOCALEDIR     "/usr/local/share/locale"
+  #define GNUPG_LIBDIR      "/usr/local/lib/gnupg"
+  #define GNUPG_DATADIR     "/usr/local/share/gnupg"
+  #ifdef __VMS
+    #define GNUPG_HOMEDIR "/SYS$LOGIN/gnupg"
+  #else
+    #define GNUPG_HOMEDIR "~/.gnupg"
+  #endif
+#endif
+/* those are here to be redefined by handcrafted g10defs.h.
+   Please note that the string version must not contain more
+   than one character because the using code assumes strlen()==1 */
+#ifdef HAVE_DOSISH_SYSTEM
+#define DIRSEP_C '\\'
+#define EXTSEP_C '.'
+#define DIRSEP_S "\\"
+#define EXTSEP_S "."
+#else
+#define DIRSEP_C '/'
+#define EXTSEP_C '.'
+#define DIRSEP_S "/"
+#define EXTSEP_S "."
+#endif
+/* This file defines some basic constants for the MPI machinery.  We
+ * need to define the types on a per-CPU basis, so it is done with
+ * this file here.  */
+#define BYTES_PER_MPI_LIMB  (SIZEOF_UNSIGNED_LONG)
+
+
+
+
+
+
diff -Nru a/security/rsa/gnupg_mpi_g10m.c b/security/rsa/gnupg_mpi_g10m.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_g10m.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,91 @@
+/* g10m.c  -  Wrapper for MPI
+ * Copyright (C) 1998 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "mpi.h"
+
+/* FIXME: The modules should use functions from libgcrypt */
+
+const char *g10m_revision_string(int dummy) { return "$Revision: 1.1 $"; }
+
+MPI
+g10m_new( unsigned nbits )
+{
+    return mpi_alloc( (nbits+BITS_PER_MPI_LIMB-1) / BITS_PER_MPI_LIMB );
+}
+
+MPI
+g10m_new_secure( unsigned nbits )
+{
+    return mpi_alloc_secure( (nbits+BITS_PER_MPI_LIMB-1) / BITS_PER_MPI_LIMB );
+}
+
+void
+g10m_release( MPI a )
+{
+    mpi_free(a);
+}
+
+void
+g10m_resize( MPI a, unsigned nbits )
+{
+    mpi_resize( a, (nbits+BITS_PER_MPI_LIMB-1) / BITS_PER_MPI_LIMB );
+}
+
+MPI  g10m_copy( MPI a ) 	   { return mpi_copy( a );   }
+void g10m_swap( MPI a, MPI b)	   { mpi_swap( a, b );	     }
+void g10m_set( MPI w, MPI u)	   { mpi_set( w, u );	     }
+void g10m_set_ui( MPI w, ulong u ) { mpi_set_ui( w, u ); }
+
+int  g10m_cmp( MPI u, MPI v )	    { return mpi_cmp( u, v ); }
+int  g10m_cmp_ui( MPI u, ulong v )  { return mpi_cmp_ui( u, v ); }
+
+void g10m_add(MPI w, MPI u, MPI v)	  { mpi_add( w, u, v ); }
+void g10m_add_ui(MPI w, MPI u, ulong v )  { mpi_add_ui( w, u, v ); }
+void g10m_sub( MPI w, MPI u, MPI v)	  { mpi_sub( w, u, v ); }
+void g10m_sub_ui(MPI w, MPI u, ulong v )  { mpi_sub_ui( w, u, v ); }
+
+void g10m_mul( MPI w, MPI u, MPI v)	     { mpi_mul( w, u, v ); }
+void g10m_mulm( MPI w, MPI u, MPI v, MPI m)  { mpi_mulm( w, u, v, m ); }
+void g10m_mul_2exp( MPI w, MPI u, ulong cnt) { mpi_mul_2exp( w, u, cnt ); }
+void g10m_mul_ui(MPI w, MPI u, ulong v )     { mpi_mul_ui( w, u, v ); }
+
+void g10m_fdiv_q( MPI q, MPI d, MPI r )      { mpi_fdiv_q( q, d, r ); }
+
+void g10m_powm( MPI r, MPI b, MPI e, MPI m)  { mpi_powm( r, b, e, m );	}
+
+int  g10m_gcd( MPI g, MPI a, MPI b )	{ return mpi_gcd( g, a, b ); }
+int  g10m_invm( MPI x, MPI u, MPI v )	{ mpi_invm( x, u, v ); return 0; }
+
+unsigned g10m_get_nbits( MPI a )   { return mpi_get_nbits( a ); }
+
+unsigned
+g10m_get_size( MPI a )
+{
+    return mpi_get_nlimbs( a ) * BITS_PER_MPI_LIMB;
+}
+
+
+void
+g10m_set_buffer( MPI a, const char *buffer, unsigned nbytes, int sign )
+{
+    mpi_set_buffer( a, buffer, nbytes, sign );
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpi-asm-defs.h b/security/rsa/gnupg_mpi_generic_mpi-asm-defs.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpi-asm-defs.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,10 @@
+/* This file defines some basic constants for the MPI machinery.  We
+ * need to define the types on a per-CPU basis, so it is done with
+ * this file here.  */
+#define BYTES_PER_MPI_LIMB  (SIZEOF_UNSIGNED_LONG)
+
+
+
+
+
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpih-add1.c b/security/rsa/gnupg_mpi_generic_mpih-add1.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpih-add1.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,62 @@
+/* mpihelp-add_1.c  -  MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 
+ *               2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+mpi_limb_t
+mpihelp_add_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+	       mpi_ptr_t s2_ptr, mpi_size_t size)
+{
+    mpi_limb_t x, y, cy;
+    mpi_size_t j;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+       the loop becomes faster.  */
+    j = -size;
+
+    /* Offset the base pointers to compensate for the negative indices. */
+    s1_ptr -= j;
+    s2_ptr -= j;
+    res_ptr -= j;
+
+    cy = 0;
+    do {
+	y = s2_ptr[j];
+	x = s1_ptr[j];
+	y += cy;		  /* add previous carry to one addend */
+	cy = y < cy;		  /* get out carry from that addition */
+	y += x; 		  /* add other addend */
+	cy += y < x;		  /* get out carry from that add, combine */
+	res_ptr[j] = y;
+    } while( ++j );
+
+    return cy;
+}
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpih-lshift.c b/security/rsa/gnupg_mpi_generic_mpih-lshift.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpih-lshift.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,66 @@
+/* mpihelp-lshift.c  -	MPI helper functions
+ * Copyright (C) 1994, 1996, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+
+/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left
+ * and store the USIZE least significant digits of the result at WP.
+ * Return the bits shifted out from the most significant digit.
+ *
+ * Argument constraints:
+ * 1. 0 < CNT < BITS_PER_MP_LIMB
+ * 2. If the result is to be written over the input, WP must be >= UP.
+ */
+
+mpi_limb_t
+mpihelp_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
+					    unsigned int cnt)
+{
+    mpi_limb_t high_limb, low_limb;
+    unsigned sh_1, sh_2;
+    mpi_size_t i;
+    mpi_limb_t retval;
+
+    sh_1 = cnt;
+    wp += 1;
+    sh_2 = BITS_PER_MPI_LIMB - sh_1;
+    i = usize - 1;
+    low_limb = up[i];
+    retval = low_limb >> sh_2;
+    high_limb = low_limb;
+    while( --i >= 0 ) {
+	low_limb = up[i];
+	wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
+	high_limb = low_limb;
+    }
+    wp[i] = high_limb << sh_1;
+
+    return retval;
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpih-mul1.c b/security/rsa/gnupg_mpi_generic_mpih-mul1.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpih-mul1.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,58 @@
+/* mpihelp-mul_1.c  -  MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+mpi_limb_t
+mpihelp_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+						    mpi_limb_t s2_limb)
+{
+    mpi_limb_t cy_limb;
+    mpi_size_t j;
+    mpi_limb_t prod_high, prod_low;
+
+    /* The loop counter and index J goes from -S1_SIZE to -1.  This way
+     * the loop becomes faster.  */
+    j = -s1_size;
+
+    /* Offset the base pointers to compensate for the negative indices.  */
+    s1_ptr -= j;
+    res_ptr -= j;
+
+    cy_limb = 0;
+    do {
+	umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb );
+	prod_low += cy_limb;
+	cy_limb = (prod_low < cy_limb?1:0) + prod_high;
+	res_ptr[j] = prod_low;
+    } while( ++j );
+
+    return cy_limb;
+}
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpih-mul2.c b/security/rsa/gnupg_mpi_generic_mpih-mul2.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpih-mul2.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,63 @@
+/* mpihelp-mul_2.c  -  MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+
+mpi_limb_t
+mpihelp_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+		  mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+    mpi_limb_t cy_limb;
+    mpi_size_t j;
+    mpi_limb_t prod_high, prod_low;
+    mpi_limb_t x;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+     * the loop becomes faster.  */
+    j = -s1_size;
+    res_ptr -= j;
+    s1_ptr -= j;
+
+    cy_limb = 0;
+    do {
+	umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb );
+
+	prod_low += cy_limb;
+	cy_limb = (prod_low < cy_limb?1:0) + prod_high;
+
+	x = res_ptr[j];
+	prod_low = x + prod_low;
+	cy_limb += prod_low < x?1:0;
+	res_ptr[j] = prod_low;
+    } while ( ++j );
+    return cy_limb;
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpih-mul3.c b/security/rsa/gnupg_mpi_generic_mpih-mul3.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpih-mul3.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,64 @@
+/* mpihelp-mul_3.c  -  MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+
+mpi_limb_t
+mpihelp_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+		  mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+    mpi_limb_t cy_limb;
+    mpi_size_t j;
+    mpi_limb_t prod_high, prod_low;
+    mpi_limb_t x;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+     * the loop becomes faster.  */
+    j = -s1_size;
+    res_ptr -= j;
+    s1_ptr -= j;
+
+    cy_limb = 0;
+    do {
+	umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb);
+
+	prod_low += cy_limb;
+	cy_limb = (prod_low < cy_limb?1:0) + prod_high;
+
+	x = res_ptr[j];
+	prod_low = x - prod_low;
+	cy_limb += prod_low > x?1:0;
+	res_ptr[j] = prod_low;
+    } while( ++j );
+
+    return cy_limb;
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpih-rshift.c b/security/rsa/gnupg_mpi_generic_mpih-rshift.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpih-rshift.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,65 @@
+/* mpih-rshift.c  -  MPI helper functions
+ * Copyright (C) 1994, 1996, 1998, 1999,
+ *               2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GNUPG
+ *
+ * GNUPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GNUPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+
+
+/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right
+ * and store the USIZE least significant limbs of the result at WP.
+ * The bits shifted out to the right are returned.
+ *
+ * Argument constraints:
+ * 1. 0 < CNT < BITS_PER_MP_LIMB
+ * 2. If the result is to be written over the input, WP must be <= UP.
+ */
+
+mpi_limb_t
+mpihelp_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt)
+{
+    mpi_limb_t high_limb, low_limb;
+    unsigned sh_1, sh_2;
+    mpi_size_t i;
+    mpi_limb_t retval;
+
+    sh_1 = cnt;
+    wp -= 1;
+    sh_2 = BITS_PER_MPI_LIMB - sh_1;
+    high_limb = up[0];
+    retval = high_limb << sh_2;
+    low_limb = high_limb;
+    for( i=1; i < usize; i++) {
+	high_limb = up[i];
+	wp[i] = (low_limb >> sh_1) | (high_limb << sh_2);
+	low_limb = high_limb;
+    }
+    wp[i] = low_limb >> sh_1;
+
+    return retval;
+}
+
diff -Nru a/security/rsa/gnupg_mpi_generic_mpih-sub1.c b/security/rsa/gnupg_mpi_generic_mpih-sub1.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_mpih-sub1.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,62 @@
+/* mpihelp-add_2.c  -  MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+mpi_limb_t
+mpihelp_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+				  mpi_ptr_t s2_ptr, mpi_size_t size)
+{
+    mpi_limb_t x, y, cy;
+    mpi_size_t j;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+       the loop becomes faster.  */
+    j = -size;
+
+    /* Offset the base pointers to compensate for the negative indices.  */
+    s1_ptr -= j;
+    s2_ptr -= j;
+    res_ptr -= j;
+
+    cy = 0;
+    do {
+	y = s2_ptr[j];
+	x = s1_ptr[j];
+	y += cy;		  /* add previous carry to subtrahend */
+	cy = y < cy;		  /* get out carry from that addition */
+	y = x - y;		  /* main subtract */
+	cy += y > x;		  /* get out carry from the subtract, combine */
+	res_ptr[j] = y;
+    } while( ++j );
+
+    return cy;
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_generic_udiv-w-sdiv.c b/security/rsa/gnupg_mpi_generic_udiv-w-sdiv.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_generic_udiv-w-sdiv.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,130 @@
+/* mpihelp_udiv_w_sdiv -- implement udiv_qrnnd on machines with only signed
+ *			  division.
+ * Copyright (C) 1992, 1994, 1996, 1998 Free Software Foundation, Inc.
+ * Contributed by Peter L. Montgomery.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+
+#if 0  /* not yet ported to MPI */
+
+mpi_limb_t
+mpihelp_udiv_w_sdiv( mpi_limp_t *rp,
+		     mpi_limp_t *a1,
+		     mpi_limp_t *a0,
+		     mpi_limp_t *d   )
+{
+  mp_limb_t q, r;
+  mp_limb_t c0, c1, b1;
+
+  if ((mpi_limb_signed_t) d >= 0)
+    {
+      if (a1 < d - a1 - (a0 >> (BITS_PER_MP_LIMB - 1)))
+	{
+	  /* dividend, divisor, and quotient are nonnegative */
+	  sdiv_qrnnd (q, r, a1, a0, d);
+	}
+      else
+	{
+	  /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
+	  sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (BITS_PER_MP_LIMB - 1));
+	  /* Divide (c1*2^32 + c0) by d */
+	  sdiv_qrnnd (q, r, c1, c0, d);
+	  /* Add 2^31 to quotient */
+	  q += (mp_limb_t) 1 << (BITS_PER_MP_LIMB - 1);
+	}
+    }
+  else
+    {
+      b1 = d >> 1;			/* d/2, between 2^30 and 2^31 - 1 */
+      c1 = a1 >> 1;			/* A/2 */
+      c0 = (a1 << (BITS_PER_MP_LIMB - 1)) + (a0 >> 1);
+
+      if (a1 < b1)			/* A < 2^32*b1, so A/2 < 2^31*b1 */
+	{
+	  sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+	  r = 2*r + (a0 & 1);		/* Remainder from A/(2*b1) */
+	  if ((d & 1) != 0)
+	    {
+	      if (r >= q)
+		r = r - q;
+	      else if (q - r <= d)
+		{
+		  r = r - q + d;
+		  q--;
+		}
+	      else
+		{
+		  r = r - q + 2*d;
+		  q -= 2;
+		}
+	    }
+	}
+      else if (c1 < b1) 		/* So 2^31 <= (A/2)/b1 < 2^32 */
+	{
+	  c1 = (b1 - 1) - c1;
+	  c0 = ~c0;			/* logical NOT */
+
+	  sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+	  q = ~q;			/* (A/2)/b1 */
+	  r = (b1 - 1) - r;
+
+	  r = 2*r + (a0 & 1);		/* A/(2*b1) */
+
+	  if ((d & 1) != 0)
+	    {
+	      if (r >= q)
+		r = r - q;
+	      else if (q - r <= d)
+		{
+		  r = r - q + d;
+		  q--;
+		}
+	      else
+		{
+		  r = r - q + 2*d;
+		  q -= 2;
+		}
+	    }
+	}
+      else				/* Implies c1 = b1 */
+	{				/* Hence a1 = d - 1 = 2*b1 - 1 */
+	  if (a0 >= -d)
+	    {
+	      q = -1;
+	      r = a0 + d;
+	    }
+	  else
+	    {
+	      q = -2;
+	      r = a0 + 2*d;
+	    }
+	}
+    }
+
+  *rp = r;
+  return q;
+}
+
+#endif
+
diff -Nru a/security/rsa/gnupg_mpi_longlong.h b/security/rsa/gnupg_mpi_longlong.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_longlong.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,1503 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+   Note: I added some stuff for use with gnupg
+
+Copyright (C) 1991, 1992, 1993, 1994, 1996, 1998,
+              2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with this file; see the file COPYING.LIB.  If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+/* You have to define the following before including this file:
+
+   UWtype -- An unsigned type, default type for operations (typically a "word")
+   UHWtype -- An unsigned type, at least half the size of UWtype.
+   UDWtype -- An unsigned type, at least twice as large a UWtype
+   W_TYPE_SIZE -- size in bits of UWtype
+
+   SItype, USItype -- Signed and unsigned 32 bit types.
+   DItype, UDItype -- Signed and unsigned 64 bit types.
+
+   On a 32 bit machine UWtype should typically be USItype;
+   on a 64 bit machine, UWtype should typically be UDItype.
+*/
+
+#define __BITS4 (W_TYPE_SIZE / 4)
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* This is used to make sure no undesirable sharing between different libraries
+   that use this file takes place.  */
+#ifndef __MPN
+#define __MPN(x) __##x
+#endif
+
+/* Define auxiliary asm macros.
+
+   1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
+   UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+   word product in HIGH_PROD and LOW_PROD.
+
+   2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
+   UDWtype product.  This is just a variant of umul_ppmm.
+
+   3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+   denominator) divides a UDWtype, composed by the UWtype integers
+   HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+   in QUOTIENT and the remainder in REMAINDER.	HIGH_NUMERATOR must be less
+   than DENOMINATOR for correct operation.  If, in addition, the most
+   significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+   UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+   4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+   denominator).  Like udiv_qrnnd but the numbers are signed.  The quotient
+   is rounded towards 0.
+
+   5) count_leading_zeros(count, x) counts the number of zero-bits from the
+   msb to the first non-zero bit in the UWtype X.  This is the number of
+   steps X needs to be shifted left to set the msb.  Undefined for X == 0,
+   unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
+
+   6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
+   from the least significant end.
+
+   7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+   high_addend_2, low_addend_2) adds two UWtype integers, composed by
+   HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+   respectively.  The result is placed in HIGH_SUM and LOW_SUM.  Overflow
+   (i.e. carry out) is not stored anywhere, and is lost.
+
+   8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+   high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+   composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+   LOW_SUBTRAHEND_2 respectively.  The result is placed in HIGH_DIFFERENCE
+   and LOW_DIFFERENCE.	Overflow (i.e. carry out) is not stored anywhere,
+   and is lost.
+
+   If any of these macros are left undefined for a particular CPU,
+   C macros are used.  */
+
+/* The CPUs come in alphabetical order below.
+
+   Please add support for more CPUs here, or improve the current support
+   for the CPUs below!	*/
+
+#if defined (__GNUC__) && !defined (NO_ASM)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+   understood by gcc1.	Use cpp to avoid major code duplication.  */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+
+/***************************************
+ **************  A29K  *****************
+ ***************************************/
+#if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add %1,%4,%5\n"   \
+           "addc %0,%2,%3"                                              \
+	   : "=r" ((USItype)(sh)),                                      \
+	    "=&r" ((USItype)(sl))                                       \
+	   : "%r" ((USItype)(ah)),                                      \
+	     "rI" ((USItype)(bh)),                                      \
+	     "%r" ((USItype)(al)),                                      \
+	     "rI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub %1,%4,%5\n"                                             \
+	   "subc %0,%2,%3"                                              \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "r" ((USItype)(ah)),                                       \
+	     "rI" ((USItype)(bh)),                                      \
+	     "r" ((USItype)(al)),                                       \
+	     "rI" ((USItype)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("multiplu %0,%1,%2"                                        \
+	     : "=r" ((USItype)(xl))                                     \
+	     : "r" (__m0),                                              \
+	       "r" (__m1));                                             \
+    __asm__ ("multmu %0,%1,%2"                                          \
+	     : "=r" ((USItype)(xh))                                     \
+	     : "r" (__m0),                                              \
+	       "r" (__m1));                                             \
+  } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("dividu %0,%3,%4"                                            \
+	   : "=r" ((USItype)(q)),                                       \
+	     "=q" ((USItype)(r))                                        \
+	   : "1" ((USItype)(n1)),                                       \
+	     "r" ((USItype)(n0)),                                       \
+	     "r" ((USItype)(d)))
+
+#define count_leading_zeros(count, x) \
+    __asm__ ("clz %0,%1"                                                \
+	     : "=r" ((USItype)(count))                                  \
+	     : "r" ((USItype)(x)))
+#define COUNT_LEADING_ZEROS_0 32
+#endif /* __a29k__ */
+
+
+#if defined (__alpha) && W_TYPE_SIZE == 64
+#define umul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    UDItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("umulh %r1,%2,%0"                                          \
+	     : "=r" ((UDItype) ph)                                      \
+	     : "%rJ" (__m0),                                            \
+	       "rI" (__m1));                                            \
+    (pl) = __m0 * __m1; 						\
+  } while (0)
+#define UMUL_TIME 46
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  do { UDItype __r;							\
+    (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); 			\
+    (r) = __r;								\
+  } while (0)
+extern UDItype __udiv_qrnnd ();
+#define UDIV_TIME 220
+#endif /* LONGLONG_STANDALONE */
+#endif /* __alpha */
+
+/***************************************
+ **************  ARM  ******************
+ ***************************************/
+#if defined (__arm__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("adds %1, %4, %5\n"                                          \
+	   "adc  %0, %2, %3"                                            \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%r" ((USItype)(ah)),                                      \
+	     "rI" ((USItype)(bh)),                                      \
+	     "%r" ((USItype)(al)),                                      \
+	     "rI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subs %1, %4, %5\n"                                          \
+	   "sbc  %0, %2, %3"                                            \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "r" ((USItype)(ah)),                                       \
+	     "rI" ((USItype)(bh)),                                      \
+	     "r" ((USItype)(al)),                                       \
+	     "rI" ((USItype)(bl)))
+#if defined __ARM_ARCH_2__ || defined __ARM_ARCH_3__
+#define umul_ppmm(xh, xl, a, b) \
+  __asm__ ("%@ Inlined umul_ppmm\n"                                     \
+	"mov	%|r0, %2, lsr #16		@ AAAA\n"               \
+	"mov	%|r2, %3, lsr #16		@ BBBB\n"               \
+	"bic	%|r1, %2, %|r0, lsl #16		@ aaaa\n"               \
+	"bic	%0, %3, %|r2, lsl #16		@ bbbb\n"               \
+	"mul	%1, %|r1, %|r2			@ aaaa * BBBB\n"        \
+	"mul	%|r2, %|r0, %|r2		@ AAAA * BBBB\n"        \
+	"mul	%|r1, %0, %|r1			@ aaaa * bbbb\n"        \
+	"mul	%0, %|r0, %0			@ AAAA * bbbb\n"        \
+	"adds	%|r0, %1, %0			@ central sum\n"        \
+	"addcs	%|r2, %|r2, #65536\n"                                   \
+	"adds	%1, %|r1, %|r0, lsl #16\n"                              \
+	"adc	%0, %|r2, %|r0, lsr #16"                                \
+	   : "=&r" ((USItype)(xh)),                                     \
+	     "=r" ((USItype)(xl))                                       \
+	   : "r" ((USItype)(a)),                                        \
+	     "r" ((USItype)(b))                                         \
+	   : "r0", "r1", "r2")
+#else
+#define umul_ppmm(xh, xl, a, b)                                         \
+  __asm__ ("%@ Inlined umul_ppmm\n"                                     \
+	   "umull %r1, %r0, %r2, %r3"                                   \
+		   : "=&r" ((USItype)(xh)),                             \
+		     "=r" ((USItype)(xl))                               \
+		   : "r" ((USItype)(a)),                                \
+		     "r" ((USItype)(b))                                 \
+		   : "r0", "r1")
+#endif
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+/***************************************
+ **************  CLIPPER  **************
+ ***************************************/
+#if defined (__clipper__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+  __asm__ ("mulwux %2,%0"                                               \
+	   : "=r" (__xx.__ll)                                           \
+	   : "%0" ((USItype)(u)),                                       \
+	     "r" ((USItype)(v)));                                       \
+  (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define smul_ppmm(w1, w0, u, v) \
+  ({union {DItype __ll; 						\
+	   struct {SItype __l, __h;} __i;				\
+	  } __xx;							\
+  __asm__ ("mulwx %2,%0"                                                \
+	   : "=r" (__xx.__ll)                                           \
+	   : "%0" ((SItype)(u)),                                        \
+	     "r" ((SItype)(v)));                                        \
+  (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+  ({UDItype __w;							\
+    __asm__ ("mulwux %2,%0"                                             \
+	     : "=r" (__w)                                               \
+	     : "%0" ((USItype)(u)),                                     \
+	       "r" ((USItype)(v)));                                     \
+    __w; })
+#endif /* __clipper__ */
+
+
+/***************************************
+ **************  GMICRO  ***************
+ ***************************************/
+#if defined (__gmicro__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add.w %5,%1\n"                                              \
+	   "addx %3,%0"                                                 \
+	   : "=g" ((USItype)(sh)),                                      \
+	     "=&g" ((USItype)(sl))                                      \
+	   : "%0" ((USItype)(ah)),                                      \
+	     "g" ((USItype)(bh)),                                       \
+	     "%1" ((USItype)(al)),                                      \
+	     "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub.w %5,%1\n"                                              \
+	   "subx %3,%0"                                                 \
+	   : "=g" ((USItype)(sh)),                                      \
+	     "=&g" ((USItype)(sl))                                      \
+	   : "0" ((USItype)(ah)),                                       \
+	     "g" ((USItype)(bh)),                                       \
+	     "1" ((USItype)(al)),                                       \
+	     "g" ((USItype)(bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+  __asm__ ("mulx %3,%0,%1"                                              \
+	   : "=g" ((USItype)(ph)),                                      \
+	     "=r" ((USItype)(pl))                                       \
+	   : "%0" ((USItype)(m0)),                                      \
+	     "g" ((USItype)(m1)))
+#define udiv_qrnnd(q, r, nh, nl, d) \
+  __asm__ ("divx %4,%0,%1"                                              \
+	   : "=g" ((USItype)(q)),                                       \
+	     "=r" ((USItype)(r))                                        \
+	   : "1" ((USItype)(nh)),                                       \
+	     "0" ((USItype)(nl)),                                       \
+	     "g" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+  __asm__ ("bsch/1 %1,%0"                                               \
+	   : "=g" (count)                                               \
+	   : "g" ((USItype)(x)),                                        \
+	     "0" ((USItype)0))
+#endif
+
+
+/***************************************
+ **************  HPPA  *****************
+ ***************************************/
+#if defined (__hppa) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("	add %4,%5,%1\n"                                             \
+ 	   "	addc %2,%3,%0"                                              \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%rM" ((USItype)(ah)),                                     \
+	     "rM" ((USItype)(bh)),                                      \
+	     "%rM" ((USItype)(al)),                                     \
+	     "rM" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("	sub %4,%5,%1\n"                                             \
+	   "	subb %2,%3,%0"                                              \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "rM" ((USItype)(ah)),                                      \
+	     "rM" ((USItype)(bh)),                                      \
+	     "rM" ((USItype)(al)),                                      \
+	     "rM" ((USItype)(bl)))
+#if defined (_PA_RISC1_1)
+#define umul_ppmm(wh, wl, u, v) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __xx;							\
+    __asm__ ("	xmpyu %1,%2,%0"                                           \
+	     : "=*f" (__xx.__ll)                                        \
+	     : "*f" ((USItype)(u)),                                     \
+	       "*f" ((USItype)(v)));                                    \
+    (wh) = __xx.__i.__h;						\
+    (wl) = __xx.__i.__l;						\
+  } while (0)
+#define UMUL_TIME 8
+#define UDIV_TIME 60
+#else
+#define UMUL_TIME 40
+#define UDIV_TIME 80
+#endif
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  do { USItype __r;							\
+    (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); 			\
+    (r) = __r;								\
+  } while (0)
+extern USItype __udiv_qrnnd ();
+#endif /* LONGLONG_STANDALONE */
+#define count_leading_zeros(count, x) \
+  do {								       \
+    USItype __tmp;						       \
+    __asm__ (				                               \
+       "	ldi             1,%0                                       \n" \
+       "	extru,= 	%1,15,16,%%r0  ; Bits 31..16 zero?         \n" \
+       "	extru,tr	%1,15,16,%1    ; No.  Shift down, skip add.\n" \
+       "	ldo		16(%0),%0      ; Yes.	Perform add.       \n" \
+       "	extru,= 	%1,23,8,%%r0   ; Bits 15..8 zero?          \n" \
+       "	extru,tr	%1,23,8,%1     ; No.  Shift down, skip add.\n" \
+       "	ldo		8(%0),%0       ; Yes.	Perform add.       \n" \
+       "	extru,= 	%1,27,4,%%r0   ; Bits 7..4 zero?           \n" \
+       "	extru,tr	%1,27,4,%1     ; No.  Shift down, skip add.\n" \
+       "	ldo		4(%0),%0       ; Yes.	Perform add.       \n" \
+       "	extru,= 	%1,29,2,%%r0   ; Bits 3..2 zero?           \n" \
+       "	extru,tr	%1,29,2,%1     ; No.  Shift down, skip add.\n" \
+       "	ldo		2(%0),%0       ; Yes.	Perform add.       \n" \
+       "	extru		%1,30,1,%1     ; Extract bit 1.            \n" \
+       "	sub		%0,%1,%0       ; Subtract it.              "   \
+       : "=r" (count), "=r" (__tmp) : "1" (x));                        \
+  } while (0)
+#endif /* hppa */
+
+
+/***************************************
+ **************  I370  *****************
+ ***************************************/
+#if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
+#define umul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __xx;							\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mr %0,%3"                                                 \
+	     : "=r" (__xx.__i.__h),                                     \
+	       "=r" (__xx.__i.__l)                                      \
+	     : "%1" (__m0),                                             \
+	       "r" (__m1));                                             \
+    (xh) = __xx.__i.__h; (xl) = __xx.__i.__l;				\
+    (xh) += ((((SItype) __m0 >> 31) & __m1)				\
+	     + (((SItype) __m1 >> 31) & __m0)); 			\
+  } while (0)
+#define smul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    union {DItype __ll; 						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __xx;							\
+    __asm__ ("mr %0,%3"                                                 \
+	     : "=r" (__xx.__i.__h),                                     \
+	       "=r" (__xx.__i.__l)                                      \
+	     : "%1" (m0),                                               \
+	       "r" (m1));                                               \
+    (xh) = __xx.__i.__h; (xl) = __xx.__i.__l;				\
+  } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+  do {									\
+    union {DItype __ll; 						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __xx;							\
+    __xx.__i.__h = n1; __xx.__i.__l = n0;				\
+    __asm__ ("dr %0,%2"                                                 \
+	     : "=r" (__xx.__ll)                                         \
+	     : "0" (__xx.__ll), "r" (d));                               \
+    (q) = __xx.__i.__l; (r) = __xx.__i.__h;				\
+  } while (0)
+#endif
+
+
+/***************************************
+ **************  I386  *****************
+ ***************************************/
+#undef __i386__
+#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addl %5,%1\n"                                               \
+	   "adcl %3,%0"                                                 \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%0" ((USItype)(ah)),                                      \
+	     "g" ((USItype)(bh)),                                       \
+	     "%1" ((USItype)(al)),                                      \
+	     "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subl %5,%1\n"                                               \
+	   "sbbl %3,%0"                                                 \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "0" ((USItype)(ah)),                                       \
+	     "g" ((USItype)(bh)),                                       \
+	     "1" ((USItype)(al)),                                       \
+	     "g" ((USItype)(bl)))
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("mull %3"                                                    \
+	   : "=a" ((USItype)(w0)),                                      \
+	     "=d" ((USItype)(w1))                                       \
+	   : "%0" ((USItype)(u)),                                       \
+	     "rm" ((USItype)(v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("divl %4"                                                    \
+	   : "=a" ((USItype)(q)),                                       \
+	     "=d" ((USItype)(r))                                        \
+	   : "0" ((USItype)(n0)),                                       \
+	     "1" ((USItype)(n1)),                                       \
+	     "rm" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __cbtmp;							\
+    __asm__ ("bsrl %1,%0"                                               \
+	     : "=r" (__cbtmp) : "rm" ((USItype)(x)));                   \
+    (count) = __cbtmp ^ 31;						\
+  } while (0)
+#define count_trailing_zeros(count, x) \
+  __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
+#ifndef UMUL_TIME
+#define UMUL_TIME 40
+#endif
+#ifndef UDIV_TIME
+#define UDIV_TIME 40
+#endif
+#endif /* 80x86 */
+
+
+/***************************************
+ **************  I860  *****************
+ ***************************************/
+#if defined (__i860__) && W_TYPE_SIZE == 32
+#define rshift_rhlc(r,h,l,c) \
+  __asm__ ("shr %3,r0,r0\n"  \
+           "shrd %1,%2,%0"   \
+	   "=r" (r) : "r" (h), "r" (l), "rn" (c))
+#endif /* i860 */
+
+/***************************************
+ **************  I960  *****************
+ ***************************************/
+#if defined (__i960__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("cmpo 1,0\n"      \
+           "addc %5,%4,%1\n" \
+           "addc %3,%2,%0"   \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%dI" ((USItype)(ah)),                                     \
+	     "dI" ((USItype)(bh)),                                      \
+	     "%dI" ((USItype)(al)),                                     \
+	     "dI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("cmpo 0,0\n"      \
+           "subc %5,%4,%1\n" \
+           "subc %3,%2,%0"   \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "dI" ((USItype)(ah)),                                      \
+	     "dI" ((USItype)(bh)),                                      \
+	     "dI" ((USItype)(al)),                                      \
+	     "dI" ((USItype)(bl)))
+#define umul_ppmm(w1, w0, u, v) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+  __asm__ ("emul        %2,%1,%0"                                       \
+	   : "=d" (__xx.__ll)                                           \
+	   : "%dI" ((USItype)(u)),                                      \
+	     "dI" ((USItype)(v)));                                      \
+  (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+  ({UDItype __w;							\
+    __asm__ ("emul      %2,%1,%0"                                       \
+	     : "=d" (__w)                                               \
+	     : "%dI" ((USItype)(u)),                                    \
+	       "dI" ((USItype)(v)));                                    \
+    __w; })
+#define udiv_qrnnd(q, r, nh, nl, d) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __nn;							\
+    __nn.__i.__h = (nh); __nn.__i.__l = (nl);				\
+    __asm__ ("ediv %d,%n,%0"                                            \
+	   : "=d" (__rq.__ll)                                           \
+	   : "dI" (__nn.__ll),                                          \
+	     "dI" ((USItype)(d)));                                      \
+    (r) = __rq.__i.__l; (q) = __rq.__i.__h;				\
+  } while (0)
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __cbtmp;							\
+    __asm__ ("scanbit %1,%0"                                            \
+	     : "=r" (__cbtmp)                                           \
+	     : "r" ((USItype)(x)));                                     \
+    (count) = __cbtmp ^ 31;						\
+  } while (0)
+#define COUNT_LEADING_ZEROS_0 (-32) /* sic */
+#if defined (__i960mx)		/* what is the proper symbol to test??? */
+#define rshift_rhlc(r,h,l,c) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __nn;							\
+    __nn.__i.__h = (h); __nn.__i.__l = (l);				\
+    __asm__ ("shre %2,%1,%0"                                            \
+	     : "=d" (r) : "dI" (__nn.__ll), "dI" (c));                  \
+  }
+#endif /* i960mx */
+#endif /* i960 */
+
+
+/***************************************
+ **************  68000	****************
+ ***************************************/
+#if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add%.l %5,%1\n"                                             \
+	   "addx%.l %3,%0"                                              \
+	   : "=d" ((USItype)(sh)),                                      \
+	     "=&d" ((USItype)(sl))                                      \
+	   : "%0" ((USItype)(ah)),                                      \
+	     "d" ((USItype)(bh)),                                       \
+	     "%1" ((USItype)(al)),                                      \
+	     "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub%.l %5,%1\n"                                             \
+	   "subx%.l %3,%0"                                              \
+	   : "=d" ((USItype)(sh)),                                      \
+	     "=&d" ((USItype)(sl))                                      \
+	   : "0" ((USItype)(ah)),                                       \
+	     "d" ((USItype)(bh)),                                       \
+	     "1" ((USItype)(al)),                                       \
+	     "g" ((USItype)(bl)))
+#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("mulu%.l %3,%1:%0"                                           \
+	   : "=d" ((USItype)(w0)),                                      \
+	     "=d" ((USItype)(w1))                                       \
+	   : "%0" ((USItype)(u)),                                       \
+	     "dmi" ((USItype)(v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("divu%.l %4,%1:%0"                                           \
+	   : "=d" ((USItype)(q)),                                       \
+	     "=d" ((USItype)(r))                                        \
+	   : "0" ((USItype)(n0)),                                       \
+	     "1" ((USItype)(n1)),                                       \
+	     "dmi" ((USItype)(d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("divs%.l %4,%1:%0"                                           \
+	   : "=d" ((USItype)(q)),                                       \
+	     "=d" ((USItype)(r))                                        \
+	   : "0" ((USItype)(n0)),                                       \
+	     "1" ((USItype)(n1)),                                       \
+	     "dmi" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+  __asm__ ("bfffo %1{%b2:%b2},%0"                                       \
+	   : "=d" ((USItype)(count))                                    \
+	   : "od" ((USItype)(x)), "n" (0))
+#define COUNT_LEADING_ZEROS_0 32
+#else /* not mc68020 */
+#define umul_ppmm(xh, xl, a, b) \
+  do { USItype __umul_tmp1, __umul_tmp2;			  \
+	__asm__ ("| Inlined umul_ppmm                         \n" \
+ "        move%.l %5,%3                                       \n" \
+ "        move%.l %2,%0                                       \n" \
+ "        move%.w %3,%1                                       \n" \
+ "        swap	%3                                            \n" \
+ "        swap	%0                                            \n" \
+ "        mulu	%2,%1                                         \n" \
+ "        mulu	%3,%0                                         \n" \
+ "        mulu	%2,%3                                         \n" \
+ "        swap	%2                                            \n" \
+ "        mulu	%5,%2                                         \n" \
+ "        add%.l	%3,%2                                 \n" \
+ "        jcc	1f                                            \n" \
+ "        add%.l	%#0x10000,%0                          \n" \
+ "1:	move%.l %2,%3                                         \n" \
+ "        clr%.w	%2                                    \n" \
+ "        swap	%2                                            \n" \
+ "        swap	%3                                            \n" \
+ "        clr%.w	%3                                    \n" \
+ "        add%.l	%3,%1                                 \n" \
+ "        addx%.l %2,%0                                       \n" \
+ "        | End inlined umul_ppmm"                                \
+	      : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)),     \
+		"=d" (__umul_tmp1), "=&d" (__umul_tmp2)           \
+	      : "%2" ((USItype)(a)), "d" ((USItype)(b)));         \
+  } while (0)
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#endif /* not mc68020 */
+#endif /* mc68000 */
+
+
+/***************************************
+ **************  88000	****************
+ ***************************************/
+#if defined (__m88000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addu.co %1,%r4,%r5\n"                                       \
+	   "addu.ci %0,%r2,%r3"                                         \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%rJ" ((USItype)(ah)),                                     \
+	     "rJ" ((USItype)(bh)),                                      \
+	     "%rJ" ((USItype)(al)),                                     \
+	     "rJ" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subu.co %1,%r4,%r5\n"                                       \
+	   "subu.ci %0,%r2,%r3"                                         \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "rJ" ((USItype)(ah)),                                      \
+	     "rJ" ((USItype)(bh)),                                      \
+	     "rJ" ((USItype)(al)),                                      \
+	     "rJ" ((USItype)(bl)))
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __cbtmp;							\
+    __asm__ ("ff1 %0,%1"                                                \
+	     : "=r" (__cbtmp)                                           \
+	     : "r" ((USItype)(x)));                                     \
+    (count) = __cbtmp ^ 31;						\
+  } while (0)
+#define COUNT_LEADING_ZEROS_0 63 /* sic */
+#if defined (__m88110__)
+#define umul_ppmm(wh, wl, u, v) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __x;							\
+    __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v));   \
+    (wh) = __x.__i.__h; 						\
+    (wl) = __x.__i.__l; 						\
+  } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __x, __q;							\
+  __x.__i.__h = (n1); __x.__i.__l = (n0);				\
+  __asm__ ("divu.d %0,%1,%2"                                            \
+	   : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d));                \
+  (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __m88110__ */
+#endif /* __m88000__ */
+
+/***************************************
+ **************  MIPS  *****************
+ ***************************************/
+#if defined (__mips__) && W_TYPE_SIZE == 32
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("multu %2,%3"                                                \
+	   : "=l" ((USItype)(w0)),                                      \
+	     "=h" ((USItype)(w1))                                       \
+	   : "d" ((USItype)(u)),                                        \
+	     "d" ((USItype)(v)))
+#else
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("multu %2,%3 \n" \
+	   "mflo %0 \n"     \
+	   "mfhi %1"                                                        \
+	   : "=d" ((USItype)(w0)),                                      \
+	     "=d" ((USItype)(w1))                                       \
+	   : "d" ((USItype)(u)),                                        \
+	     "d" ((USItype)(v)))
+#endif
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+#endif /* __mips__ */
+
+/***************************************
+ **************  MIPS/64  **************
+ ***************************************/
+#if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("dmultu %2,%3"                                               \
+	   : "=l" ((UDItype)(w0)),                                      \
+	     "=h" ((UDItype)(w1))                                       \
+	   : "d" ((UDItype)(u)),                                        \
+	     "d" ((UDItype)(v)))
+#else
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("dmultu %2,%3 \n"    \
+	   "mflo %0 \n"         \
+	   "mfhi %1"                                                        \
+	   : "=d" ((UDItype)(w0)),                                      \
+	     "=d" ((UDItype)(w1))                                       \
+	   : "d" ((UDItype)(u)),                                        \
+	     "d" ((UDItype)(v)))
+#endif
+#define UMUL_TIME 20
+#define UDIV_TIME 140
+#endif /* __mips__ */
+
+
+/***************************************
+ **************  32000	****************
+ ***************************************/
+#if defined (__ns32000__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+  __asm__ ("meid %2,%0"                                                 \
+	   : "=g" (__xx.__ll)                                           \
+	   : "%0" ((USItype)(u)),                                       \
+	     "g" ((USItype)(v)));                                       \
+  (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+  ({UDItype __w;							\
+    __asm__ ("meid %2,%0"                                               \
+	     : "=g" (__w)                                               \
+	     : "%0" ((USItype)(u)),                                     \
+	       "g" ((USItype)(v)));                                     \
+    __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+  __xx.__i.__h = (n1); __xx.__i.__l = (n0);				\
+  __asm__ ("deid %2,%0"                                                 \
+	   : "=g" (__xx.__ll)                                           \
+	   : "0" (__xx.__ll),                                           \
+	     "g" ((USItype)(d)));                                       \
+  (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
+#define count_trailing_zeros(count,x) \
+  do {
+    __asm__ ("ffsd      %2,%0"                                          \
+	     : "=r" ((USItype) (count))                                 \
+	     : "0" ((USItype) 0),                                       \
+	       "r" ((USItype) (x)));                                    \
+  } while (0)
+#endif /* __ns32000__ */
+
+
+/***************************************
+ **************  PPC  ******************
+ ***************************************/
+#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  do {									\
+    if (__builtin_constant_p (bh) && (bh) == 0) 			\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"           \
+	     : "=r" ((USItype)(sh)),                                    \
+	       "=&r" ((USItype)(sl))                                    \
+	     : "%r" ((USItype)(ah)),                                    \
+	       "%r" ((USItype)(al)),                                    \
+	       "rI" ((USItype)(bl)));                                   \
+    else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0)		\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"           \
+	     : "=r" ((USItype)(sh)),                                    \
+	       "=&r" ((USItype)(sl))                                    \
+	     : "%r" ((USItype)(ah)),                                    \
+	       "%r" ((USItype)(al)),                                    \
+	       "rI" ((USItype)(bl)));                                   \
+    else								\
+      __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"          \
+	     : "=r" ((USItype)(sh)),                                    \
+	       "=&r" ((USItype)(sl))                                    \
+	     : "%r" ((USItype)(ah)),                                    \
+	       "r" ((USItype)(bh)),                                     \
+	       "%r" ((USItype)(al)),                                    \
+	       "rI" ((USItype)(bl)));                                   \
+  } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  do {									\
+    if (__builtin_constant_p (ah) && (ah) == 0) 			\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"       \
+	       : "=r" ((USItype)(sh)),                                  \
+		 "=&r" ((USItype)(sl))                                  \
+	       : "r" ((USItype)(bh)),                                   \
+		 "rI" ((USItype)(al)),                                  \
+		 "r" ((USItype)(bl)));                                  \
+    else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"       \
+	       : "=r" ((USItype)(sh)),                                  \
+		 "=&r" ((USItype)(sl))                                  \
+	       : "r" ((USItype)(bh)),                                   \
+		 "rI" ((USItype)(al)),                                  \
+		 "r" ((USItype)(bl)));                                  \
+    else if (__builtin_constant_p (bh) && (bh) == 0)			\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"         \
+	       : "=r" ((USItype)(sh)),                                  \
+		 "=&r" ((USItype)(sl))                                  \
+	       : "r" ((USItype)(ah)),                                   \
+		 "rI" ((USItype)(al)),                                  \
+		 "r" ((USItype)(bl)));                                  \
+    else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"         \
+	       : "=r" ((USItype)(sh)),                                  \
+		 "=&r" ((USItype)(sl))                                  \
+	       : "r" ((USItype)(ah)),                                   \
+		 "rI" ((USItype)(al)),                                  \
+		 "r" ((USItype)(bl)));                                  \
+    else								\
+      __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"      \
+	       : "=r" ((USItype)(sh)),                                  \
+		 "=&r" ((USItype)(sl))                                  \
+	       : "r" ((USItype)(ah)),                                   \
+		 "r" ((USItype)(bh)),                                   \
+		 "rI" ((USItype)(al)),                                  \
+		 "r" ((USItype)(bl)));                                  \
+  } while (0)
+#define count_leading_zeros(count, x) \
+  __asm__ ("{cntlz|cntlzw} %0,%1"                                       \
+	   : "=r" ((USItype)(count))                                    \
+	   : "r" ((USItype)(x)))
+#define COUNT_LEADING_ZEROS_0 32
+#if defined (_ARCH_PPC)
+#define umul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mulhwu %0,%1,%2"                                          \
+	     : "=r" ((USItype) ph)                                      \
+	     : "%r" (__m0),                                             \
+	       "r" (__m1));                                             \
+    (pl) = __m0 * __m1; 						\
+  } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    SItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mulhw %0,%1,%2"                                           \
+	     : "=r" ((SItype) ph)                                       \
+	     : "%r" (__m0),                                             \
+	       "r" (__m1));                                             \
+    (pl) = __m0 * __m1; 						\
+  } while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#else
+#define umul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mul %0,%2,%3"                                             \
+	     : "=r" ((USItype)(xh)),                                    \
+	       "=q" ((USItype)(xl))                                     \
+	     : "r" (__m0),                                              \
+	       "r" (__m1));                                             \
+    (xh) += ((((SItype) __m0 >> 31) & __m1)				\
+	     + (((SItype) __m1 >> 31) & __m0)); 			\
+  } while (0)
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+  __asm__ ("mul %0,%2,%3"                                               \
+	   : "=r" ((SItype)(xh)),                                       \
+	     "=q" ((SItype)(xl))                                        \
+	   : "r" (m0),                                                  \
+	     "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+  __asm__ ("div %0,%2,%4"                                               \
+	   : "=r" ((SItype)(q)), "=q" ((SItype)(r))                     \
+	   : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
+#define UDIV_TIME 100
+#endif
+#endif /* Power architecture variants.	*/
+
+
+/***************************************
+ **************  PYR  ******************
+ ***************************************/
+#if defined (__pyr__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addw        %5,%1 \n" \
+	   "addwc	%3,%0"                                          \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%0" ((USItype)(ah)),                                      \
+	     "g" ((USItype)(bh)),                                       \
+	     "%1" ((USItype)(al)),                                      \
+	     "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subw        %5,%1 \n" \
+	   "subwb	%3,%0"                                          \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "0" ((USItype)(ah)),                                       \
+	     "g" ((USItype)(bh)),                                       \
+	     "1" ((USItype)(al)),                                       \
+	     "g" ((USItype)(bl)))
+/* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP.  */
+#define umul_ppmm(w1, w0, u, v) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __xx;							\
+  __asm__ ("movw %1,%R0 \n" \
+	   "uemul %2,%0"                                                \
+	   : "=&r" (__xx.__ll)                                          \
+	   : "g" ((USItype) (u)),                                       \
+	     "g" ((USItype)(v)));                                       \
+  (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#endif /* __pyr__ */
+
+
+/***************************************
+ **************  RT/ROMP  **************
+ ***************************************/
+#if defined (__ibm032__) /* RT/ROMP */	&& W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("a %1,%5 \n" \
+	   "ae %0,%3"                                                   \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%0" ((USItype)(ah)),                                      \
+	     "r" ((USItype)(bh)),                                       \
+	     "%1" ((USItype)(al)),                                      \
+	     "r" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("s %1,%5\n" \
+	   "se %0,%3"                                                   \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "0" ((USItype)(ah)),                                       \
+	     "r" ((USItype)(bh)),                                       \
+	     "1" ((USItype)(al)),                                       \
+	     "r" ((USItype)(bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ (								\
+       "s       r2,r2    \n" \
+       "mts	r10,%2   \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "m	r2,%3    \n" \
+       "cas	%0,r2,r0 \n" \
+       "mfs	r10,%1"                                                 \
+	     : "=r" ((USItype)(ph)),                                    \
+	       "=r" ((USItype)(pl))                                     \
+	     : "%r" (__m0),                                             \
+		"r" (__m1)                                              \
+	     : "r2");                                                   \
+    (ph) += ((((SItype) __m0 >> 31) & __m1)				\
+	     + (((SItype) __m1 >> 31) & __m0)); 			\
+  } while (0)
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#define count_leading_zeros(count, x) \
+  do {									\
+    if ((x) >= 0x10000) 						\
+      __asm__ ("clz     %0,%1"                                          \
+	       : "=r" ((USItype)(count))                                \
+	       : "r" ((USItype)(x) >> 16));                             \
+    else								\
+      { 								\
+	__asm__ ("clz   %0,%1"                                          \
+		 : "=r" ((USItype)(count))                              \
+		 : "r" ((USItype)(x)));                                 \
+	(count) += 16;							\
+      } 								\
+  } while (0)
+#endif /* RT/ROMP */
+
+
+/***************************************
+ **************  SH2  ******************
+ ***************************************/
+#if (defined (__sh2__) || defined(__sh3__) || defined(__SH4__) ) \
+    && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ (								\
+        "dmulu.l %2,%3\n"  \
+	"sts	macl,%1\n" \
+	"sts	mach,%0"                                                \
+	   : "=r" ((USItype)(w1)),                                      \
+	     "=r" ((USItype)(w0))                                       \
+	   : "r" ((USItype)(u)),                                        \
+	     "r" ((USItype)(v))                                         \
+	   : "macl", "mach")
+#define UMUL_TIME 5
+#endif
+
+/***************************************
+ **************  SPARC	****************
+ ***************************************/
+#if defined (__sparc__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addcc %r4,%5,%1\n" \
+	   "addx %r2,%3,%0"                                             \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "%rJ" ((USItype)(ah)),                                     \
+	     "rI" ((USItype)(bh)),                                      \
+	     "%rJ" ((USItype)(al)),                                     \
+	     "rI" ((USItype)(bl))                                       \
+	   __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subcc %r4,%5,%1\n" \
+	   "subx %r2,%3,%0"                                             \
+	   : "=r" ((USItype)(sh)),                                      \
+	     "=&r" ((USItype)(sl))                                      \
+	   : "rJ" ((USItype)(ah)),                                      \
+	     "rI" ((USItype)(bh)),                                      \
+	     "rJ" ((USItype)(al)),                                      \
+	     "rI" ((USItype)(bl))                                       \
+	   __CLOBBER_CC)
+#if defined (__sparc_v8__)
+/* Don't match immediate range because, 1) it is not often useful,
+   2) the 'I' flag thinks of the range as a 13 bit signed interval,
+   while we want to match a 13 bit interval, sign extended to 32 bits,
+   but INTERPRETED AS UNSIGNED.  */
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("umul %2,%3,%1;rd %%y,%0"                                    \
+	   : "=r" ((USItype)(w1)),                                      \
+	     "=r" ((USItype)(w0))                                       \
+	   : "r" ((USItype)(u)),                                        \
+	     "r" ((USItype)(v)))
+#define UMUL_TIME 5
+#ifndef SUPERSPARC	/* SuperSPARC's udiv only handles 53 bit dividends */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  do {									\
+    USItype __q;							\
+    __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0"                     \
+	     : "=r" ((USItype)(__q))                                    \
+	     : "r" ((USItype)(n1)),                                     \
+	       "r" ((USItype)(n0)),                                     \
+	       "r" ((USItype)(d)));                                     \
+    (r) = (n0) - __q * (d);						\
+    (q) = __q;								\
+  } while (0)
+#define UDIV_TIME 25
+#endif /* SUPERSPARC */
+#else /* ! __sparc_v8__ */
+#if defined (__sparclite__)
+/* This has hardware multiply but not divide.  It also has two additional
+   instructions scan (ffs from high bit) and divscc.  */
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("umul %2,%3,%1;rd %%y,%0"                                    \
+	   : "=r" ((USItype)(w1)),                                      \
+	     "=r" ((USItype)(w0))                                       \
+	   : "r" ((USItype)(u)),                                        \
+	     "r" ((USItype)(v)))
+#define UMUL_TIME 5
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("! Inlined udiv_qrnnd                                     \n" \
+ "        wr	%%g0,%2,%%y	! Not a delayed write for sparclite  \n" \
+ "        tst	%%g0                                                 \n" \
+ "        divscc	%3,%4,%%g1                                   \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%%g1                                 \n" \
+ "        divscc	%%g1,%4,%0                                   \n" \
+ "        rd	%%y,%1                                               \n" \
+ "        bl,a 1f                                                    \n" \
+ "        add	%1,%4,%1                                             \n" \
+ "1:	! End of inline udiv_qrnnd"                                     \
+	   : "=r" ((USItype)(q)),                                       \
+	     "=r" ((USItype)(r))                                        \
+	   : "r" ((USItype)(n1)),                                       \
+	     "r" ((USItype)(n0)),                                       \
+	     "rI" ((USItype)(d))                                        \
+	   : "%g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#define count_leading_zeros(count, x) \
+  __asm__ ("scan %1,0,%0"                                               \
+	   : "=r" ((USItype)(x))                                        \
+	   : "r" ((USItype)(count)))
+/* Early sparclites return 63 for an argument of 0, but they warn that future
+   implementations might change this.  Therefore, leave COUNT_LEADING_ZEROS_0
+   undefined.  */
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+/* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd.  */
+#ifndef umul_ppmm
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("! Inlined umul_ppmm                                        \n" \
+ "        wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr  \n" \
+ "        sra	%3,31,%%g2	! Don't move this insn                 \n" \
+ "        and	%2,%%g2,%%g2	! Don't move this insn                 \n" \
+ "        andcc	%%g0,0,%%g1	! Don't move this insn                 \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,%3,%%g1                                   \n" \
+ "        mulscc	%%g1,0,%%g1                                    \n" \
+ "        add	%%g1,%%g2,%0                                           \n" \
+ "        rd	%%y,%1"                                                 \
+	   : "=r" ((USItype)(w1)),                                      \
+	     "=r" ((USItype)(w0))                                       \
+	   : "%rI" ((USItype)(u)),                                      \
+	     "r" ((USItype)(v))                                         \
+	   : "%g1", "%g2" __AND_CLOBBER_CC)
+#define UMUL_TIME 39		/* 39 instructions */
+#endif
+#ifndef udiv_qrnnd
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  do { USItype __r;							\
+    (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); 			\
+    (r) = __r;								\
+  } while (0)
+extern USItype __udiv_qrnnd ();
+#define UDIV_TIME 140
+#endif /* LONGLONG_STANDALONE */
+#endif /* udiv_qrnnd */
+#endif /* __sparc__ */
+
+
+/***************************************
+ **************  VAX  ******************
+ ***************************************/
+#if defined (__vax__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addl2 %5,%1\n" \
+	   "adwc %3,%0"                                                 \
+	   : "=g" ((USItype)(sh)),                                      \
+	     "=&g" ((USItype)(sl))                                      \
+	   : "%0" ((USItype)(ah)),                                      \
+	     "g" ((USItype)(bh)),                                       \
+	     "%1" ((USItype)(al)),                                      \
+	     "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subl2 %5,%1\n" \
+	   "sbwc %3,%0"                                                 \
+	   : "=g" ((USItype)(sh)),                                      \
+	     "=&g" ((USItype)(sl))                                      \
+	   : "0" ((USItype)(ah)),                                       \
+	     "g" ((USItype)(bh)),                                       \
+	     "1" ((USItype)(al)),                                       \
+	     "g" ((USItype)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("emul %1,%2,$0,%0"                                         \
+	     : "=g" (__xx.__ll)                                         \
+	     : "g" (__m0),                                              \
+	       "g" (__m1));                                             \
+    (xh) = __xx.__i.__h; (xl) = __xx.__i.__l;				\
+    (xh) += ((((SItype) __m0 >> 31) & __m1)				\
+	     + (((SItype) __m1 >> 31) & __m0)); 			\
+  } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+  do {									\
+    union {DItype __ll; 						\
+	   struct {SItype __l, __h;} __i;				\
+	  } __xx;							\
+    __xx.__i.__h = n1; __xx.__i.__l = n0;				\
+    __asm__ ("ediv %3,%2,%0,%1"                                         \
+	     : "=g" (q), "=g" (r)                                       \
+	     : "g" (__xx.__ll), "g" (d));                               \
+  } while (0)
+#endif /* __vax__ */
+
+
+/***************************************
+ **************  Z8000	****************
+ ***************************************/
+#if defined (__z8000__) && W_TYPE_SIZE == 16
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add %H1,%H5\n\tadc  %H0,%H3"                                \
+	   : "=r" ((unsigned int)(sh)),                                 \
+	     "=&r" ((unsigned int)(sl))                                 \
+	   : "%0" ((unsigned int)(ah)),                                 \
+	     "r" ((unsigned int)(bh)),                                  \
+	     "%1" ((unsigned int)(al)),                                 \
+	     "rQR" ((unsigned int)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub %H1,%H5\n\tsbc  %H0,%H3"                                \
+	   : "=r" ((unsigned int)(sh)),                                 \
+	     "=&r" ((unsigned int)(sl))                                 \
+	   : "0" ((unsigned int)(ah)),                                  \
+	     "r" ((unsigned int)(bh)),                                  \
+	     "1" ((unsigned int)(al)),                                  \
+	     "rQR" ((unsigned int)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    union {long int __ll;						\
+	   struct {unsigned int __h, __l;} __i; 			\
+	  } __xx;							\
+    unsigned int __m0 = (m0), __m1 = (m1);				\
+    __asm__ ("mult      %S0,%H3"                                        \
+	     : "=r" (__xx.__i.__h),                                     \
+	       "=r" (__xx.__i.__l)                                      \
+	     : "%1" (__m0),                                             \
+	       "rQR" (__m1));                                           \
+    (xh) = __xx.__i.__h; (xl) = __xx.__i.__l;				\
+    (xh) += ((((signed int) __m0 >> 15) & __m1) 			\
+	     + (((signed int) __m1 >> 15) & __m0));			\
+  } while (0)
+#endif /* __z8000__ */
+
+#endif /* __GNUC__ */
+
+
+/***************************************
+ ***********  Generic Versions	********
+ ***************************************/
+#if !defined (umul_ppmm) && defined (__umulsidi3)
+#define umul_ppmm(ph, pl, m0, m1) \
+  {									\
+    UDWtype __ll = __umulsidi3 (m0, m1);				\
+    ph = (UWtype) (__ll >> W_TYPE_SIZE);				\
+    pl = (UWtype) __ll; 						\
+  }
+#endif
+
+#if !defined (__umulsidi3)
+#define __umulsidi3(u, v) \
+  ({UWtype __hi, __lo;							\
+    umul_ppmm (__hi, __lo, u, v);					\
+    ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
+#endif
+
+/* If this machine has no inline assembler, use C macros.  */
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  do {									\
+    UWtype __x; 							\
+    __x = (al) + (bl);							\
+    (sh) = (ah) + (bh) + (__x < (al));					\
+    (sl) = __x; 							\
+  } while (0)
+#endif
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  do {									\
+    UWtype __x; 							\
+    __x = (al) - (bl);							\
+    (sh) = (ah) - (bh) - (__x > (al));					\
+    (sl) = __x; 							\
+  } while (0)
+#endif
+
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) 					\
+  do {									\
+    UWtype __x0, __x1, __x2, __x3;					\
+    UHWtype __ul, __vl, __uh, __vh;					\
+    UWtype __u = (u), __v = (v);					\
+									\
+    __ul = __ll_lowpart (__u);						\
+    __uh = __ll_highpart (__u); 					\
+    __vl = __ll_lowpart (__v);						\
+    __vh = __ll_highpart (__v); 					\
+									\
+    __x0 = (UWtype) __ul * __vl;					\
+    __x1 = (UWtype) __ul * __vh;					\
+    __x2 = (UWtype) __uh * __vl;					\
+    __x3 = (UWtype) __uh * __vh;					\
+									\
+    __x1 += __ll_highpart (__x0);/* this can't give carry */            \
+    __x1 += __x2;		/* but this indeed can */		\
+    if (__x1 < __x2)		/* did we get it? */			\
+      __x3 += __ll_B;		/* yes, add it in the proper pos. */	\
+									\
+    (w1) = __x3 + __ll_highpart (__x1); 				\
+    (w0) = (__ll_lowpart (__x1) << W_TYPE_SIZE/2) + __ll_lowpart (__x0);\
+  } while (0)
+#endif
+
+#if !defined (umul_ppmm)
+#define smul_ppmm(w1, w0, u, v) 					\
+  do {									\
+    UWtype __w1;							\
+    UWtype __m0 = (u), __m1 = (v);					\
+    umul_ppmm (__w1, w0, __m0, __m1);					\
+    (w1) = __w1 - (-(__m0 >> (W_TYPE_SIZE - 1)) & __m1) 		\
+		- (-(__m1 >> (W_TYPE_SIZE - 1)) & __m0);		\
+  } while (0)
+#endif
+
+/* Define this unconditionally, so it can be used for debugging.  */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+  do {									\
+    UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;			\
+    __d1 = __ll_highpart (d);						\
+    __d0 = __ll_lowpart (d);						\
+									\
+    __r1 = (n1) % __d1; 						\
+    __q1 = (n1) / __d1; 						\
+    __m = (UWtype) __q1 * __d0; 					\
+    __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
+    if (__r1 < __m)							\
+      { 								\
+	__q1--, __r1 += (d);						\
+	if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+	  if (__r1 < __m)						\
+	    __q1--, __r1 += (d);					\
+      } 								\
+    __r1 -= __m;							\
+									\
+    __r0 = __r1 % __d1; 						\
+    __q0 = __r1 / __d1; 						\
+    __m = (UWtype) __q0 * __d0; 					\
+    __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
+    if (__r0 < __m)							\
+      { 								\
+	__q0--, __r0 += (d);						\
+	if (__r0 >= (d))						\
+	  if (__r0 < __m)						\
+	    __q0--, __r0 += (d);					\
+      } 								\
+    __r0 -= __m;							\
+									\
+    (q) = (UWtype) __q1 * __ll_B | __q0;				\
+    (r) = __r0; 							\
+  } while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+   __udiv_w_sdiv (defined in libgcc or elsewhere).  */
+#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+  do {									\
+    UWtype __r; 							\
+    (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); 			\
+    (r) = __r;								\
+  } while (0)
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c.  */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#undef count_leading_zeros
+#if !defined (count_leading_zeros)
+extern
+#ifdef __STDC__
+const
+#endif
+unsigned char __clz_tab[];
+#define MPI_INTERNAL_NEED_CLZ_TAB 1
+#define count_leading_zeros(count, x) \
+  do {									\
+    UWtype __xr = (x);							\
+    UWtype __a; 							\
+                         						\
+    if (W_TYPE_SIZE <= 32)						\
+      { 								\
+	__a = __xr < ((UWtype) 1 << 2*__BITS4)				\
+	  ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4)		\
+	  : (__xr < ((UWtype) 1 << 3*__BITS4) ?  2*__BITS4 : 3*__BITS4);\
+      } 								\
+    else								\
+      { 								\
+	for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8)			\
+	  if (((__xr >> __a) & 0xff) != 0)				\
+	    break;							\
+      } 								\
+									\
+    (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a);		\
+  } while (0)
+/* This version gives a well-defined value for zero. */
+#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
+#endif
+
+#if !defined (count_trailing_zeros)
+/* Define count_trailing_zeros using count_leading_zeros.  The latter might be
+   defined in asm, but if it is not, the C version above is good enough.  */
+#define count_trailing_zeros(count, x) \
+  do {									\
+    UWtype __ctz_x = (x);						\
+    UWtype __ctz_c;							\
+    count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x);			\
+    (count) = W_TYPE_SIZE - 1 - __ctz_c;				\
+  } while (0)
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
diff -Nru a/security/rsa/gnupg_mpi_memory.h b/security/rsa/gnupg_mpi_memory.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_memory.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,60 @@
+/* memory.h - memory allocation
+ *	Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GNUPG.
+ *
+ * GNUPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GNUPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#ifndef G10_MEMORY_H
+#define G10_MEMORY_H
+
+#include <linux/mm.h>
+
+
+#define m_alloc(n)		kmalloc(n, GFP_KERNEL)
+#define m_alloc_clear(n)	kmalloc(n, GFP_KERNEL) /* can't memset, no size or don't know how to get it */
+#define m_alloc_secure(n)	kmalloc(n, GFP_KERNEL)
+#define m_alloc_secure_clear(n) kmalloc(n, GFP_KERNEL) /* can't memset, no size or don't know how to get it */
+#define m_free(n)		kfree(n) 
+#define m_check(n)		/* nothing to do here */
+#define m_size(n)               sizeof(n)
+#define m_is_secure(n)          1
+
+/* &&&& realloc kernel hack, should check this */
+#define m_realloc(n,m)		krealloc((n),(m))
+
+static inline void *
+krealloc(void *ptr, size_t size)
+{
+  void *tmp = NULL;
+  if (size) {
+    tmp = kmalloc(size,GFP_KERNEL);
+    if (ptr) memcpy(tmp,ptr,size);
+  }
+  kfree(ptr);
+  return tmp;
+}
+
+#define DBG_MEMORY    memory_debug_mode
+#define DBG_MEMSTAT   memory_stat_debug_mode
+
+#define EXTERN_UNLESS_MAIN_MODULE extern
+
+EXTERN_UNLESS_MAIN_MODULE int memory_debug_mode;
+EXTERN_UNLESS_MAIN_MODULE int memory_stat_debug_mode;
+
+
+#endif /*G10_MEMORY_H*/
diff -Nru a/security/rsa/gnupg_mpi_mpi-add.c b/security/rsa/gnupg_mpi_mpi-add.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-add.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,243 @@
+/* mpi-add.c  -  MPI functions
+ *	Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *	Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_config.h"
+#include "gnupg_mpi_mpi-internal.h"
+
+/****************
+ * Add the unsigned integer V to the mpi-integer U and store the
+ * result in W. U and V may be the same.
+ */
+void
+mpi_add_ui(MPI w, MPI u, unsigned long v )
+{
+    mpi_ptr_t wp, up;
+    mpi_size_t usize, wsize;
+    int usign, wsign;
+
+    usize = u->nlimbs;
+    usign = u->sign;
+    wsign = 0;
+
+    /* If not space for W (and possible carry), increase space.  */
+    wsize = usize + 1;
+    if( w->alloced < wsize )
+	mpi_resize(w, wsize);
+
+    /* These must be after realloc (U may be the same as W).  */
+    up = u->d;
+    wp = w->d;
+
+    if( !usize ) {  /* simple */
+	wp[0] = v;
+	wsize = v? 1:0;
+    }
+    else if( !usign ) {  /* mpi is not negative */
+	mpi_limb_t cy;
+	cy = mpihelp_add_1(wp, up, usize, v);
+	wp[usize] = cy;
+	wsize = usize + cy;
+    }
+    else {  /* The signs are different.  Need exact comparison to determine
+	     * which operand to subtract from which.  */
+	if( usize == 1 && up[0] < v ) {
+	    wp[0] = v - up[0];
+	    wsize = 1;
+	}
+	else {
+	    mpihelp_sub_1(wp, up, usize, v);
+	    /* Size can decrease with at most one limb. */
+	    wsize = usize - (wp[usize-1]==0);
+	    wsign = 1;
+	}
+    }
+
+    w->nlimbs = wsize;
+    w->sign   = wsign;
+}
+
+
+void
+mpi_add(MPI w, MPI u, MPI v)
+{
+    mpi_ptr_t wp, up, vp;
+    mpi_size_t usize, vsize, wsize;
+    int usign, vsign, wsign;
+
+    if( u->nlimbs < v->nlimbs ) { /* Swap U and V. */
+	usize = v->nlimbs;
+	usign = v->sign;
+	vsize = u->nlimbs;
+	vsign = u->sign;
+	wsize = usize + 1;
+	RESIZE_IF_NEEDED(w, wsize);
+	/* These must be after realloc (u or v may be the same as w).  */
+	up    = v->d;
+	vp    = u->d;
+    }
+    else {
+	usize = u->nlimbs;
+	usign = u->sign;
+	vsize = v->nlimbs;
+	vsign = v->sign;
+	wsize = usize + 1;
+	RESIZE_IF_NEEDED(w, wsize);
+	/* These must be after realloc (u or v may be the same as w).  */
+	up    = u->d;
+	vp    = v->d;
+    }
+    wp = w->d;
+    wsign = 0;
+
+    if( !vsize ) {  /* simple */
+	MPN_COPY(wp, up, usize );
+	wsize = usize;
+	wsign = usign;
+    }
+    else if( usign != vsign ) { /* different sign */
+	/* This test is right since USIZE >= VSIZE */
+	if( usize != vsize ) {
+	    mpihelp_sub(wp, up, usize, vp, vsize);
+	    wsize = usize;
+	    MPN_NORMALIZE(wp, wsize);
+	    wsign = usign;
+	}
+	else if( mpihelp_cmp(up, vp, usize) < 0 ) {
+	    mpihelp_sub_n(wp, vp, up, usize);
+	    wsize = usize;
+	    MPN_NORMALIZE(wp, wsize);
+	    if( !usign )
+		wsign = 1;
+	}
+	else {
+	    mpihelp_sub_n(wp, up, vp, usize);
+	    wsize = usize;
+	    MPN_NORMALIZE(wp, wsize);
+	    if( usign )
+		wsign = 1;
+	}
+    }
+    else { /* U and V have same sign. Add them. */
+	mpi_limb_t cy = mpihelp_add(wp, up, usize, vp, vsize);
+	wp[usize] = cy;
+	wsize = usize + cy;
+	if( usign )
+	    wsign = 1;
+    }
+
+    w->nlimbs = wsize;
+    w->sign = wsign;
+}
+
+
+/****************
+ * Subtract the unsigned integer V from the mpi-integer U and store the
+ * result in W.
+ */
+void
+mpi_sub_ui(MPI w, MPI u, unsigned long v )
+{
+    mpi_ptr_t wp, up;
+    mpi_size_t usize, wsize;
+    int usign, wsign;
+
+    usize = u->nlimbs;
+    usign = u->sign;
+    wsign = 0;
+
+    /* If not space for W (and possible carry), increase space.  */
+    wsize = usize + 1;
+    if( w->alloced < wsize )
+	mpi_resize(w, wsize);
+
+    /* These must be after realloc (U may be the same as W).  */
+    up = u->d;
+    wp = w->d;
+
+    if( !usize ) {  /* simple */
+	wp[0] = v;
+	wsize = v? 1:0;
+	wsign = 1;
+    }
+    else if( usign ) {	/* mpi and v are negative */
+	mpi_limb_t cy;
+	cy = mpihelp_add_1(wp, up, usize, v);
+	wp[usize] = cy;
+	wsize = usize + cy;
+    }
+    else {  /* The signs are different.  Need exact comparison to determine
+	     * which operand to subtract from which.  */
+	if( usize == 1 && up[0] < v ) {
+	    wp[0] = v - up[0];
+	    wsize = 1;
+	    wsign = 1;
+	}
+	else {
+	    mpihelp_sub_1(wp, up, usize, v);
+	    /* Size can decrease with at most one limb. */
+	    wsize = usize - (wp[usize-1]==0);
+	}
+    }
+
+    w->nlimbs = wsize;
+    w->sign   = wsign;
+}
+
+void
+mpi_sub(MPI w, MPI u, MPI v)
+{
+    if( w == v ) {
+	MPI vv = mpi_copy(v);
+	vv->sign = !vv->sign;
+	mpi_add( w, u, vv );
+	mpi_free(vv);
+    }
+    else {
+	/* fixme: this is not thread-save (we temp. modify v) */
+	v->sign = !v->sign;
+	mpi_add( w, u, v );
+	v->sign = !v->sign;
+    }
+}
+
+
+void
+mpi_addm( MPI w, MPI u, MPI v, MPI m)
+{
+    mpi_add(w, u, v);
+    mpi_fdiv_r( w, w, m );
+}
+
+void
+mpi_subm( MPI w, MPI u, MPI v, MPI m)
+{
+    mpi_sub(w, u, v);
+    mpi_fdiv_r( w, w, m );
+}
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-bit.c b/security/rsa/gnupg_mpi_mpi-bit.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-bit.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,253 @@
+/* mpi-bit.c  -  MPI bit level fucntions
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+#define MPI_INTERNAL_NEED_CLZ_TAB
+#ifdef MPI_INTERNAL_NEED_CLZ_TAB
+#ifdef __STDC__
+const
+#endif
+unsigned char
+__clz_tab[] =
+{
+  0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+};
+#endif
+
+
+#define A_LIMB_1 ((mpi_limb_t)1)
+
+
+/****************
+ * Sometimes we have MSL (most significant limbs) which are 0;
+ * this is for some reasons not good, so this function removes them.
+ */
+void
+mpi_normalize( MPI a )
+{
+    if( mpi_is_opaque (a) )
+	return;
+
+    for( ; a->nlimbs && !a->d[a->nlimbs-1]; a->nlimbs-- )
+	;
+}
+
+
+
+/****************
+ * Return the number of bits in A.
+ */
+unsigned
+mpi_get_nbits( MPI a )
+{
+    unsigned n;
+
+    mpi_normalize( a );
+
+    if( a->nlimbs ) {
+	mpi_limb_t alimb = a->d[a->nlimbs-1];
+	if( alimb ) {
+	  count_leading_zeros( n, alimb );
+	}
+	else
+	    n = BITS_PER_MPI_LIMB;
+	n = BITS_PER_MPI_LIMB - n + (a->nlimbs-1) * BITS_PER_MPI_LIMB;
+    }
+    else
+	n = 0;
+    return n;
+}
+
+
+/****************
+ * Test whether bit N is set.
+ */
+int
+mpi_test_bit( MPI a, unsigned n )
+{
+    unsigned limbno, bitno;
+    mpi_limb_t limb;
+
+    limbno = n / BITS_PER_MPI_LIMB;
+    bitno  = n % BITS_PER_MPI_LIMB;
+
+    if( limbno >= a->nlimbs )
+	return 0; /* too far left: this is a 0 */
+    limb = a->d[limbno];
+    return (limb & (A_LIMB_1 << bitno))? 1: 0;
+}
+
+
+/****************
+ * Set bit N of A.
+ */
+void
+mpi_set_bit( MPI a, unsigned n )
+{
+    unsigned limbno, bitno;
+
+    limbno = n / BITS_PER_MPI_LIMB;
+    bitno  = n % BITS_PER_MPI_LIMB;
+
+    if( limbno >= a->nlimbs ) { /* resize */
+	if( a->alloced >= limbno )
+	    mpi_resize(a, limbno+1 );
+	a->nlimbs = limbno+1;
+    }
+    a->d[limbno] |= (A_LIMB_1<<bitno);
+}
+
+/****************
+ * Set bit N of A. and clear all bits above
+ */
+void
+mpi_set_highbit( MPI a, unsigned n )
+{
+    unsigned limbno, bitno;
+
+    limbno = n / BITS_PER_MPI_LIMB;
+    bitno  = n % BITS_PER_MPI_LIMB;
+
+    if( limbno >= a->nlimbs ) { /* resize */
+	if( a->alloced >= limbno )
+	    mpi_resize(a, limbno+1 );
+	a->nlimbs = limbno+1;
+    }
+    a->d[limbno] |= (A_LIMB_1<<bitno);
+    for( bitno++; bitno < BITS_PER_MPI_LIMB; bitno++ )
+	a->d[limbno] &= ~(A_LIMB_1 << bitno);
+    a->nlimbs = limbno+1;
+}
+
+/****************
+ * clear bit N of A and all bits above
+ */
+void
+mpi_clear_highbit( MPI a, unsigned n )
+{
+    unsigned limbno, bitno;
+
+    limbno = n / BITS_PER_MPI_LIMB;
+    bitno  = n % BITS_PER_MPI_LIMB;
+
+    if( limbno >= a->nlimbs )
+	return; /* not allocated, so need to clear bits :-) */
+
+    for( ; bitno < BITS_PER_MPI_LIMB; bitno++ )
+	a->d[limbno] &= ~(A_LIMB_1 << bitno);
+    a->nlimbs = limbno+1;
+}
+
+/****************
+ * Clear bit N of A.
+ */
+void
+mpi_clear_bit( MPI a, unsigned n )
+{
+    unsigned limbno, bitno;
+
+    limbno = n / BITS_PER_MPI_LIMB;
+    bitno  = n % BITS_PER_MPI_LIMB;
+
+    if( limbno >= a->nlimbs )
+	return; /* don't need to clear this bit, it's to far to left */
+    a->d[limbno] &= ~(A_LIMB_1 << bitno);
+}
+
+
+/****************
+ * Shift A by N bits to the right
+ * FIXME: should use alloc_limb if X and A are same.
+ */
+void
+mpi_rshift( MPI x, MPI a, unsigned n )
+{
+    mpi_ptr_t xp;
+    mpi_size_t xsize;
+
+    xsize = a->nlimbs;
+    x->sign = a->sign;
+    RESIZE_IF_NEEDED(x, xsize);
+    xp = x->d;
+
+    if( xsize ) {
+	mpihelp_rshift( xp, a->d, xsize, n);
+	MPN_NORMALIZE( xp, xsize);
+    }
+    x->nlimbs = xsize;
+}
+
+
+/****************
+ * Shift A by COUNT limbs to the left
+ * This is used only within the MPI library
+ */
+void
+mpi_lshift_limbs( MPI a, unsigned int count )
+{
+    mpi_ptr_t ap = a->d;
+    int n = a->nlimbs;
+    int i;
+
+    if( !count || !n )
+	return;
+
+    RESIZE_IF_NEEDED( a, n+count );
+
+    for( i = n-1; i >= 0; i-- )
+	ap[i+count] = ap[i];
+    for(i=0; i < count; i++ )
+	ap[i] = 0;
+    a->nlimbs += count;
+}
+
+
+/****************
+ * Shift A by COUNT limbs to the right
+ * This is used only within the MPI library
+ */
+void
+mpi_rshift_limbs( MPI a, unsigned int count )
+{
+    mpi_ptr_t ap = a->d;
+    mpi_size_t n = a->nlimbs;
+    unsigned int i;
+
+    if( count >= n ) {
+	a->nlimbs = 0;
+	return;
+    }
+
+    for( i = 0; i < n - count; i++ )
+	ap[i] = ap[i+count];
+    ap[i] = 0;
+    a->nlimbs -= count;
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-cmp.c b/security/rsa/gnupg_mpi_mpi-cmp.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-cmp.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,71 @@
+/* mpi-cmp.c  -  MPI functions
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+
+int
+mpi_cmp_ui( MPI u, unsigned long v )
+{
+    mpi_limb_t limb = v;
+
+    mpi_normalize( u );
+    if( !u->nlimbs && !limb )
+	return 0;
+    if( u->sign )
+	return -1;
+    if( u->nlimbs > 1 )
+	return 1;
+
+    if( u->d[0] == limb )
+	return 0;
+    else if( u->d[0] > limb )
+	return 1;
+    else
+	return -1;
+}
+
+int
+mpi_cmp( MPI u, MPI v )
+{
+    mpi_size_t usize, vsize;
+    int cmp;
+
+    mpi_normalize( u );
+    mpi_normalize( v );
+    usize = u->nlimbs;
+    vsize = v->nlimbs;
+    if( !u->sign && v->sign )
+	return 1;
+    if( u->sign && !v->sign )
+	return -1;
+    if( usize != vsize && !u->sign && !v->sign )
+	return usize - vsize;
+    if( usize != vsize && u->sign && v->sign )
+	return vsize + usize;
+    if( !usize )
+	return 0;
+    if( !(cmp=mpihelp_cmp( u->d, v->d, usize )) )
+	return 0;
+    if( (cmp < 0?1:0) == (u->sign?1:0))
+	return 1;
+    return -1;
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-div.c b/security/rsa/gnupg_mpi_mpi-div.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-div.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,318 @@
+/* mpi-div.c  -  MPI functions
+ *	Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ *	Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+
+void
+mpi_fdiv_r( MPI rem, MPI dividend, MPI divisor )
+{
+    int divisor_sign = divisor->sign;
+    MPI temp_divisor = NULL;
+
+    /* We need the original value of the divisor after the remainder has been
+     * preliminary calculated.	We have to copy it to temporary space if it's
+     * the same variable as REM.  */
+    if( rem == divisor ) {
+	temp_divisor = mpi_copy( divisor );
+	divisor = temp_divisor;
+    }
+
+    mpi_tdiv_r( rem, dividend, divisor );
+
+    if( ((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs )
+	mpi_add( rem, rem, divisor);
+
+    if( temp_divisor )
+	mpi_free(temp_divisor);
+}
+
+
+
+/****************
+ * Division rounding the quotient towards -infinity.
+ * The remainder gets the same sign as the denominator.
+ * rem is optional
+ */
+
+ulong
+mpi_fdiv_r_ui( MPI rem, MPI dividend, ulong divisor )
+{
+    mpi_limb_t rlimb;
+
+    rlimb = mpihelp_mod_1( dividend->d, dividend->nlimbs, divisor );
+    if( rlimb && dividend->sign )
+	rlimb = divisor - rlimb;
+
+    if( rem ) {
+	rem->d[0] = rlimb;
+	rem->nlimbs = rlimb? 1:0;
+    }
+    return rlimb;
+}
+
+
+void
+mpi_fdiv_q( MPI quot, MPI dividend, MPI divisor )
+{
+    MPI tmp = mpi_alloc( mpi_get_nlimbs(quot) );
+    mpi_fdiv_qr( quot, tmp, dividend, divisor);
+    mpi_free(tmp);
+}
+
+void
+mpi_fdiv_qr( MPI quot, MPI rem, MPI dividend, MPI divisor )
+{
+    int divisor_sign = divisor->sign;
+    MPI temp_divisor = NULL;
+
+    if( quot == divisor || rem == divisor ) {
+	temp_divisor = mpi_copy( divisor );
+	divisor = temp_divisor;
+    }
+
+    mpi_tdiv_qr( quot, rem, dividend, divisor );
+
+    if( (divisor_sign ^ dividend->sign) && rem->nlimbs ) {
+	mpi_sub_ui( quot, quot, 1 );
+	mpi_add( rem, rem, divisor);
+    }
+
+    if( temp_divisor )
+	mpi_free(temp_divisor);
+}
+
+
+/* If den == quot, den needs temporary storage.
+ * If den == rem, den needs temporary storage.
+ * If num == quot, num needs temporary storage.
+ * If den has temporary storage, it can be normalized while being copied,
+ *   i.e no extra storage should be allocated.
+ */
+
+void
+mpi_tdiv_r( MPI rem, MPI num, MPI den)
+{
+    mpi_tdiv_qr(NULL, rem, num, den );
+}
+
+void
+mpi_tdiv_qr( MPI quot, MPI rem, MPI num, MPI den)
+{
+    mpi_ptr_t np, dp;
+    mpi_ptr_t qp, rp;
+    mpi_size_t nsize = num->nlimbs;
+    mpi_size_t dsize = den->nlimbs;
+    mpi_size_t qsize, rsize;
+    mpi_size_t sign_remainder = num->sign;
+    mpi_size_t sign_quotient = num->sign ^ den->sign;
+    unsigned normalization_steps;
+    mpi_limb_t q_limb;
+    mpi_ptr_t marker[5];
+    int markidx=0;
+
+    /* Ensure space is enough for quotient and remainder.
+     * We need space for an extra limb in the remainder, because it's
+     * up-shifted (normalized) below.  */
+    rsize = nsize + 1;
+    mpi_resize( rem, rsize);
+
+    qsize = rsize - dsize;	  /* qsize cannot be bigger than this.	*/
+    if( qsize <= 0 ) {
+	if( num != rem ) {
+	    rem->nlimbs = num->nlimbs;
+	    rem->sign = num->sign;
+	    MPN_COPY(rem->d, num->d, nsize);
+	}
+	if( quot ) {
+	    /* This needs to follow the assignment to rem, in case the
+	     * numerator and quotient are the same.  */
+	    quot->nlimbs = 0;
+	    quot->sign = 0;
+	}
+	return;
+    }
+
+    if( quot )
+	mpi_resize( quot, qsize);
+
+    /* Read pointers here, when reallocation is finished.  */
+    np = num->d;
+    dp = den->d;
+    rp = rem->d;
+
+    /* Optimize division by a single-limb divisor.  */
+    if( dsize == 1 ) {
+	mpi_limb_t rlimb;
+	if( quot ) {
+	    qp = quot->d;
+	    rlimb = mpihelp_divmod_1( qp, np, nsize, dp[0] );
+	    qsize -= qp[qsize - 1] == 0;
+	    quot->nlimbs = qsize;
+	    quot->sign = sign_quotient;
+	}
+	else
+	    rlimb = mpihelp_mod_1( np, nsize, dp[0] );
+	rp[0] = rlimb;
+	rsize = rlimb != 0?1:0;
+	rem->nlimbs = rsize;
+	rem->sign = sign_remainder;
+	return;
+    }
+
+
+    if( quot ) {
+	qp = quot->d;
+	/* Make sure QP and NP point to different objects.  Otherwise the
+	 * numerator would be gradually overwritten by the quotient limbs.  */
+	if(qp == np) { /* Copy NP object to temporary space.  */
+	    np = marker[markidx++] = mpi_alloc_limb_space(nsize,
+							  mpi_is_secure(quot));
+	    MPN_COPY(np, qp, nsize);
+	}
+    }
+    else /* Put quotient at top of remainder. */
+	qp = rp + dsize;
+
+    count_leading_zeros( normalization_steps, dp[dsize - 1] );
+
+    /* Normalize the denominator, i.e. make its most significant bit set by
+     * shifting it NORMALIZATION_STEPS bits to the left.  Also shift the
+     * numerator the same number of steps (to keep the quotient the same!).
+     */
+    if( normalization_steps ) {
+	mpi_ptr_t tp;
+	mpi_limb_t nlimb;
+
+	/* Shift up the denominator setting the most significant bit of
+	 * the most significant word.  Use temporary storage not to clobber
+	 * the original contents of the denominator.  */
+	tp = marker[markidx++] = mpi_alloc_limb_space(dsize,mpi_is_secure(den));
+	mpihelp_lshift( tp, dp, dsize, normalization_steps );
+	dp = tp;
+
+	/* Shift up the numerator, possibly introducing a new most
+	 * significant word.  Move the shifted numerator in the remainder
+	 * meanwhile.  */
+	nlimb = mpihelp_lshift(rp, np, nsize, normalization_steps);
+	if( nlimb ) {
+	    rp[nsize] = nlimb;
+	    rsize = nsize + 1;
+	}
+	else
+	    rsize = nsize;
+    }
+    else {
+	/* The denominator is already normalized, as required.	Copy it to
+	 * temporary space if it overlaps with the quotient or remainder.  */
+	if( dp == rp || (quot && (dp == qp))) {
+	    mpi_ptr_t tp;
+
+	    tp = marker[markidx++] = mpi_alloc_limb_space(dsize, mpi_is_secure(den));
+	    MPN_COPY( tp, dp, dsize );
+	    dp = tp;
+	}
+
+	/* Move the numerator to the remainder.  */
+	if( rp != np )
+	    MPN_COPY(rp, np, nsize);
+
+	rsize = nsize;
+    }
+
+    q_limb = mpihelp_divrem( qp, 0, rp, rsize, dp, dsize );
+
+    if( quot ) {
+	qsize = rsize - dsize;
+	if(q_limb) {
+	    qp[qsize] = q_limb;
+	    qsize += 1;
+	}
+
+	quot->nlimbs = qsize;
+	quot->sign = sign_quotient;
+    }
+
+    rsize = dsize;
+    MPN_NORMALIZE (rp, rsize);
+
+    if( normalization_steps && rsize ) {
+	mpihelp_rshift(rp, rp, rsize, normalization_steps);
+	rsize -= rp[rsize - 1] == 0?1:0;
+    }
+
+    rem->nlimbs = rsize;
+    rem->sign	= sign_remainder;
+    while( markidx )
+	mpi_free_limb_space(marker[--markidx]);
+}
+
+void
+mpi_tdiv_q_2exp( MPI w, MPI u, unsigned count )
+{
+    mpi_size_t usize, wsize;
+    mpi_size_t limb_cnt;
+
+    usize = u->nlimbs;
+    limb_cnt = count / BITS_PER_MPI_LIMB;
+    wsize = usize - limb_cnt;
+    if( limb_cnt >= usize )
+	w->nlimbs = 0;
+    else {
+	mpi_ptr_t wp;
+	mpi_ptr_t up;
+
+	RESIZE_IF_NEEDED( w, wsize );
+	wp = w->d;
+	up = u->d;
+
+	count %= BITS_PER_MPI_LIMB;
+	if( count ) {
+	    mpihelp_rshift( wp, up + limb_cnt, wsize, count );
+	    wsize -= !wp[wsize - 1];
+	}
+	else {
+	    MPN_COPY_INCR( wp, up + limb_cnt, wsize);
+	}
+
+	w->nlimbs = wsize;
+    }
+}
+
+/****************
+ * Check whether dividend is divisible by divisor
+ * (note: divisor must fit into a limb)
+ */
+int
+mpi_divisible_ui(MPI dividend, ulong divisor )
+{
+    return !mpihelp_mod_1( dividend->d, dividend->nlimbs, divisor );
+}
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-gcd.c b/security/rsa/gnupg_mpi_mpi-gcd.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-gcd.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,51 @@
+/* mpi-gcd.c  -  MPI functions
+ * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+
+/****************
+ * Find the greatest common divisor G of A and B.
+ * Return: true if this 1, false in all other cases
+ */
+int
+mpi_gcd( MPI g, MPI xa, MPI xb )
+{
+    MPI a, b;
+
+    a = mpi_copy(xa);
+    b = mpi_copy(xb);
+
+    /* TAOCP Vol II, 4.5.2, Algorithm A */
+    a->sign = 0;
+    b->sign = 0;
+    while( mpi_cmp_ui( b, 0 ) ) {
+	mpi_fdiv_r( g, a, b ); /* g used as temorary variable */
+	mpi_set(a,b);
+	mpi_set(b,g);
+    }
+    mpi_set(g, a);
+
+    mpi_free(a);
+    mpi_free(b);
+    return !mpi_cmp_ui( g, 1);
+}
+
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-inline.c b/security/rsa/gnupg_mpi_mpi-inline.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-inline.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,33 @@
+/* mpi-inline.c
+ * Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+
+/* put the inline functions as real functions into the lib */
+#define G10_MPI_INLINE_DECL
+
+#include "gnupg_mpi_mpi-internal.h"
+
+/* always include the header becuase it is only
+ * included by mpi-internal if __GCC__ is defined but we
+ * need it here in all cases and the above definition of
+ * of the macro allows us to do so
+ */
+#include "gnupg_mpi_mpi-inline.h"
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-inline.h b/security/rsa/gnupg_mpi_mpi-inline.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-inline.h	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,128 @@
+/* mpi-inline.h  -  Internal to the Multi Precision Integers
+ *	Copyright (C) 1994, 1996, 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#ifndef G10_MPI_INLINE_H
+#define G10_MPI_INLINE_H
+
+#ifndef G10_MPI_INLINE_DECL
+  #define G10_MPI_INLINE_DECL  extern __inline__
+#endif
+
+G10_MPI_INLINE_DECL  mpi_limb_t
+mpihelp_add_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+	       mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+    mpi_limb_t x;
+
+    x = *s1_ptr++;
+    s2_limb += x;
+    *res_ptr++ = s2_limb;
+    if( s2_limb < x ) { /* sum is less than the left operand: handle carry */
+	while( --s1_size ) {
+	    x = *s1_ptr++ + 1;	/* add carry */
+	    *res_ptr++ = x;	/* and store */
+	    if( x )		/* not 0 (no overflow): we can stop */
+		goto leave;
+	}
+	return 1; /* return carry (size of s1 to small) */
+    }
+
+  leave:
+    if( res_ptr != s1_ptr ) { /* not the same variable */
+	mpi_size_t i;	       /* copy the rest */
+	for( i=0; i < s1_size-1; i++ )
+	    res_ptr[i] = s1_ptr[i];
+    }
+    return 0; /* no carry */
+}
+
+
+
+G10_MPI_INLINE_DECL mpi_limb_t
+mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+			       mpi_ptr_t s2_ptr, mpi_size_t s2_size)
+{
+    mpi_limb_t cy = 0;
+
+    if( s2_size )
+	cy = mpihelp_add_n( res_ptr, s1_ptr, s2_ptr, s2_size );
+
+    if( s1_size - s2_size )
+	cy = mpihelp_add_1( res_ptr + s2_size, s1_ptr + s2_size,
+			    s1_size - s2_size, cy);
+    return cy;
+}
+
+
+G10_MPI_INLINE_DECL mpi_limb_t
+mpihelp_sub_1(mpi_ptr_t res_ptr,  mpi_ptr_t s1_ptr,
+	      mpi_size_t s1_size, mpi_limb_t s2_limb )
+{
+    mpi_limb_t x;
+
+    x = *s1_ptr++;
+    s2_limb = x - s2_limb;
+    *res_ptr++ = s2_limb;
+    if( s2_limb > x ) {
+	while( --s1_size ) {
+	    x = *s1_ptr++;
+	    *res_ptr++ = x - 1;
+	    if( x )
+		goto leave;
+	}
+	return 1;
+    }
+
+  leave:
+    if( res_ptr != s1_ptr ) {
+	mpi_size_t i;
+	for( i=0; i < s1_size-1; i++ )
+	    res_ptr[i] = s1_ptr[i];
+    }
+    return 0;
+}
+
+
+
+G10_MPI_INLINE_DECL   mpi_limb_t
+mpihelp_sub( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+				mpi_ptr_t s2_ptr, mpi_size_t s2_size)
+{
+    mpi_limb_t cy = 0;
+
+    if( s2_size )
+	cy = mpihelp_sub_n(res_ptr, s1_ptr, s2_ptr, s2_size);
+
+    if( s1_size - s2_size )
+	cy = mpihelp_sub_1(res_ptr + s2_size, s1_ptr + s2_size,
+				      s1_size - s2_size, cy);
+    return cy;
+}
+
+
+#endif /*G10_MPI_INLINE_H*/
diff -Nru a/security/rsa/gnupg_mpi_mpi-internal.h b/security/rsa/gnupg_mpi_mpi-internal.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-internal.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,259 @@
+/* mpi-internal.h  -  Internal to the Multi Precision Integers
+ *	Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ *	Copyright (C) 1998, 2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#ifndef G10_MPI_INTERNAL_H
+#define G10_MPI_INTERNAL_H
+
+#include "gnupg_mpi_mpi.h"
+
+/* If KARATSUBA_THRESHOLD is not already defined, define it to a
+ * value which is good on most machines.  */
+
+/* tested 4, 16, 32 and 64, where 16 gave the best performance when
+ * checking a 768 and a 1024 bit ElGamal signature.
+ * (wk 22.12.97) */
+#ifndef KARATSUBA_THRESHOLD
+    #define KARATSUBA_THRESHOLD 16
+#endif
+
+/* The code can't handle KARATSUBA_THRESHOLD smaller than 2.  */
+#if KARATSUBA_THRESHOLD < 2
+    #undef KARATSUBA_THRESHOLD
+    #define KARATSUBA_THRESHOLD 2
+#endif
+
+
+typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
+typedef int mpi_size_t;        /* (must be a signed type) */
+
+#define ABS(x) (x >= 0 ? x : -x)
+#define MIN(l,o) ((l) < (o) ? (l) : (o))
+#define MAX(h,i) ((h) > (i) ? (h) : (i))
+#define RESIZE_IF_NEEDED(a,b) \
+    do {			   \
+	if( (a)->alloced < (b) )   \
+	    mpi_resize((a), (b));  \
+    } while(0)
+
+/* Copy N limbs from S to D.  */
+#define MPN_COPY( d, s, n) \
+    do {				\
+	mpi_size_t _i;			\
+	for( _i = 0; _i < (n); _i++ )	\
+	    (d)[_i] = (s)[_i];		\
+    } while(0)
+
+#define MPN_COPY_INCR( d, s, n) 	\
+    do {				\
+	mpi_size_t _i;			\
+	for( _i = 0; _i < (n); _i++ )	\
+	    (d)[_i] = (d)[_i];		\
+    } while (0)
+
+#define MPN_COPY_DECR( d, s, n ) \
+    do {				\
+	mpi_size_t _i;			\
+	for( _i = (n)-1; _i >= 0; _i--) \
+	   (d)[_i] = (s)[_i];		\
+    } while(0)
+
+/* Zero N limbs at D */
+#define MPN_ZERO(d, n) \
+    do {				  \
+	int  _i;			  \
+	for( _i = 0; _i < (n); _i++ )  \
+	    (d)[_i] = 0;		    \
+    } while (0)
+
+#define MPN_NORMALIZE(d, n)  \
+    do {		       \
+	while( (n) > 0 ) {     \
+	    if( (d)[(n)-1] ) \
+		break;	       \
+	    (n)--;	       \
+	}		       \
+    } while(0)
+
+#define MPN_NORMALIZE_NOT_ZERO(d, n) \
+    do {				    \
+	for(;;) {			    \
+	    if( (d)[(n)-1] )		    \
+		break;			    \
+	    (n)--;			    \
+	}				    \
+    } while(0)
+
+#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
+    do {						\
+	if( (size) < KARATSUBA_THRESHOLD )		\
+	    mul_n_basecase (prodp, up, vp, size);	\
+	else						\
+	    mul_n (prodp, up, vp, size, tspace);	\
+    } while (0);
+
+
+/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
+ * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
+ * If this would yield overflow, DI should be the largest possible number
+ * (i.e., only ones).  For correct operation, the most significant bit of D
+ * has to be set.  Put the quotient in Q and the remainder in R.
+ */
+#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
+    do {							    \
+	mpi_limb_t _q, _ql, _r; 				    \
+	mpi_limb_t _xh, _xl;					    \
+	umul_ppmm (_q, _ql, (nh), (di));			    \
+	_q += (nh);	/* DI is 2**BITS_PER_MPI_LIMB too small */  \
+	umul_ppmm (_xh, _xl, _q, (d));				    \
+	sub_ddmmss (_xh, _r, (nh), (nl), _xh, _xl);		    \
+	if( _xh ) {						    \
+	    sub_ddmmss (_xh, _r, _xh, _r, 0, (d));		    \
+	    _q++;						    \
+	    if( _xh) {						    \
+		sub_ddmmss (_xh, _r, _xh, _r, 0, (d));		    \
+		_q++;						    \
+	    }							    \
+	}							    \
+	if( _r >= (d) ) {					    \
+	    _r -= (d);						    \
+	    _q++;						    \
+	}							    \
+	(r) = _r;						    \
+	(q) = _q;						    \
+    } while (0)
+
+
+/*-- mpiutil.c --*/
+#ifdef M_DEBUG
+  #define mpi_alloc_limb_space(n,f)  mpi_debug_alloc_limb_space((n),(f), M_DBGINFO( __LINE__ ) )
+  #define mpi_free_limb_space(n)  mpi_debug_free_limb_space((n),  M_DBGINFO( __LINE__ ) )
+  mpi_ptr_t mpi_debug_alloc_limb_space( unsigned nlimbs, int sec, const char *info  );
+  void mpi_debug_free_limb_space( mpi_ptr_t a, const char *info );
+#else
+  mpi_ptr_t mpi_alloc_limb_space( unsigned nlimbs, int sec );
+  void mpi_free_limb_space( mpi_ptr_t a );
+#endif
+void mpi_assign_limb_space( MPI a, mpi_ptr_t ap, unsigned nlimbs );
+
+/*-- mpi-bit.c --*/
+void mpi_rshift_limbs( MPI a, unsigned int count );
+void mpi_lshift_limbs( MPI a, unsigned int count );
+
+
+/*-- mpihelp-add.c --*/
+mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr,  mpi_ptr_t s1_ptr,
+			 mpi_size_t s1_size, mpi_limb_t s2_limb );
+mpi_limb_t mpihelp_add_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+			  mpi_ptr_t s2_ptr,  mpi_size_t size);
+mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+		       mpi_ptr_t s2_ptr, mpi_size_t s2_size);
+
+/*-- mpihelp-sub.c --*/
+mpi_limb_t mpihelp_sub_1( mpi_ptr_t res_ptr,  mpi_ptr_t s1_ptr,
+			  mpi_size_t s1_size, mpi_limb_t s2_limb );
+mpi_limb_t mpihelp_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+			  mpi_ptr_t s2_ptr, mpi_size_t size);
+mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+		       mpi_ptr_t s2_ptr, mpi_size_t s2_size);
+
+/*-- mpihelp-cmp.c --*/
+int mpihelp_cmp( mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size );
+
+/*-- mpihelp-mul.c --*/
+
+struct karatsuba_ctx {
+    struct karatsuba_ctx *next;
+    mpi_ptr_t tspace;
+    mpi_size_t tspace_size;
+    mpi_ptr_t tp;
+    mpi_size_t tp_size;
+};
+
+void mpihelp_release_karatsuba_ctx( struct karatsuba_ctx *ctx );
+
+mpi_limb_t mpihelp_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+			     mpi_size_t s1_size, mpi_limb_t s2_limb);
+mpi_limb_t mpihelp_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+			     mpi_size_t s1_size, mpi_limb_t s2_limb);
+void mpihelp_mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
+						   mpi_size_t size);
+mpi_limb_t mpihelp_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
+					 mpi_ptr_t vp, mpi_size_t vsize);
+void mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size );
+void mpih_sqr_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
+						mpi_ptr_t tspace);
+
+void mpihelp_mul_karatsuba_case( mpi_ptr_t prodp,
+				 mpi_ptr_t up, mpi_size_t usize,
+				 mpi_ptr_t vp, mpi_size_t vsize,
+				 struct karatsuba_ctx *ctx );
+
+
+/*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/
+mpi_limb_t mpihelp_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+			  mpi_size_t s1_size, mpi_limb_t s2_limb);
+
+/*-- mpihelp-div.c --*/
+mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+						 mpi_limb_t divisor_limb);
+mpi_limb_t mpihelp_divrem( mpi_ptr_t qp, mpi_size_t qextra_limbs,
+			   mpi_ptr_t np, mpi_size_t nsize,
+			   mpi_ptr_t dp, mpi_size_t dsize);
+mpi_limb_t mpihelp_divmod_1( mpi_ptr_t quot_ptr,
+			     mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+			     mpi_limb_t divisor_limb);
+
+/*-- mpihelp-shift.c --*/
+mpi_limb_t mpihelp_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
+							   unsigned cnt);
+mpi_limb_t mpihelp_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
+							   unsigned cnt);
+
+
+/* Define stuff for longlong.h.  */
+#define W_TYPE_SIZE BITS_PER_MPI_LIMB
+  typedef mpi_limb_t   UWtype;
+  typedef unsigned int UHWtype;
+#if defined (__GNUC__)
+  typedef unsigned int UQItype	  __attribute__ ((mode (QI)));
+  typedef	   int SItype	  __attribute__ ((mode (SI)));
+  typedef unsigned int USItype	  __attribute__ ((mode (SI)));
+  typedef	   int DItype	  __attribute__ ((mode (DI)));
+  typedef unsigned int UDItype	  __attribute__ ((mode (DI)));
+#else
+  typedef unsigned char UQItype;
+  typedef	   long SItype;
+  typedef unsigned long USItype;
+#endif
+
+#ifdef __GNUC__
+  #include "gnupg_mpi_mpi-inline.h"
+#endif
+
+#endif /*G10_MPI_INTERNAL_H*/
diff -Nru a/security/rsa/gnupg_mpi_mpi-inv.c b/security/rsa/gnupg_mpi_mpi-inv.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-inv.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,267 @@
+/* mpi-inv.c  -  MPI functions
+ * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+
+
+/****************
+ * Calculate the multiplicative inverse X of A mod N
+ * That is: Find the solution x for
+ *		1 = (a*x) mod n
+ */
+void
+mpi_invm( MPI x, MPI a, MPI n )
+{
+  #if 0
+    MPI u, v, u1, u2, u3, v1, v2, v3, q, t1, t2, t3;
+    MPI ta, tb, tc;
+
+    u = mpi_copy(a);
+    v = mpi_copy(n);
+    u1 = mpi_alloc_set_ui(1);
+    u2 = mpi_alloc_set_ui(0);
+    u3 = mpi_copy(u);
+    v1 = mpi_alloc_set_ui(0);
+    v2 = mpi_alloc_set_ui(1);
+    v3 = mpi_copy(v);
+    q  = mpi_alloc( mpi_get_nlimbs(u)+1 );
+    t1 = mpi_alloc( mpi_get_nlimbs(u)+1 );
+    t2 = mpi_alloc( mpi_get_nlimbs(u)+1 );
+    t3 = mpi_alloc( mpi_get_nlimbs(u)+1 );
+    while( mpi_cmp_ui( v3, 0 ) ) {
+	mpi_fdiv_q( q, u3, v3 );
+	mpi_mul(t1, v1, q); mpi_mul(t2, v2, q); mpi_mul(t3, v3, q);
+	mpi_sub(t1, u1, t1); mpi_sub(t2, u2, t2); mpi_sub(t3, u3, t3);
+	mpi_set(u1, v1); mpi_set(u2, v2); mpi_set(u3, v3);
+	mpi_set(v1, t1); mpi_set(v2, t2); mpi_set(v3, t3);
+    }
+    /*	log_debug("result:\n");
+	log_mpidump("q =", q );
+	log_mpidump("u1=", u1);
+	log_mpidump("u2=", u2);
+	log_mpidump("u3=", u3);
+	log_mpidump("v1=", v1);
+	log_mpidump("v2=", v2); */
+    mpi_set(x, u1);
+
+    mpi_free(u1);
+    mpi_free(u2);
+    mpi_free(u3);
+    mpi_free(v1);
+    mpi_free(v2);
+    mpi_free(v3);
+    mpi_free(q);
+    mpi_free(t1);
+    mpi_free(t2);
+    mpi_free(t3);
+    mpi_free(u);
+    mpi_free(v);
+  #elif 0
+    /* Extended Euclid's algorithm (See TAOPC Vol II, 4.5.2, Alg X)
+     * modified according to Michael Penk's solution for Exercice 35 */
+
+    /* FIXME: we can simplify this in most cases (see Knuth) */
+    MPI u, v, u1, u2, u3, v1, v2, v3, t1, t2, t3;
+    unsigned k;
+    int sign;
+
+    u = mpi_copy(a);
+    v = mpi_copy(n);
+    for(k=0; !mpi_test_bit(u,0) && !mpi_test_bit(v,0); k++ ) {
+	mpi_rshift(u, u, 1);
+	mpi_rshift(v, v, 1);
+    }
+
+
+    u1 = mpi_alloc_set_ui(1);
+    u2 = mpi_alloc_set_ui(0);
+    u3 = mpi_copy(u);
+    v1 = mpi_copy(v);				   /* !-- used as const 1 */
+    v2 = mpi_alloc( mpi_get_nlimbs(u) ); mpi_sub( v2, u1, u );
+    v3 = mpi_copy(v);
+    if( mpi_test_bit(u, 0) ) { /* u is odd */
+	t1 = mpi_alloc_set_ui(0);
+	t2 = mpi_alloc_set_ui(1); t2->sign = 1;
+	t3 = mpi_copy(v); t3->sign = !t3->sign;
+	goto Y4;
+    }
+    else {
+	t1 = mpi_alloc_set_ui(1);
+	t2 = mpi_alloc_set_ui(0);
+	t3 = mpi_copy(u);
+    }
+    do {
+	do {
+	    if( mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0) ) { /* one is odd */
+		mpi_add(t1, t1, v);
+		mpi_sub(t2, t2, u);
+	    }
+	    mpi_rshift(t1, t1, 1);
+	    mpi_rshift(t2, t2, 1);
+	    mpi_rshift(t3, t3, 1);
+	  Y4:
+	    ;
+	} while( !mpi_test_bit( t3, 0 ) ); /* while t3 is even */
+
+	if( !t3->sign ) {
+	    mpi_set(u1, t1);
+	    mpi_set(u2, t2);
+	    mpi_set(u3, t3);
+	}
+	else {
+	    mpi_sub(v1, v, t1);
+	    sign = u->sign; u->sign = !u->sign;
+	    mpi_sub(v2, u, t2);
+	    u->sign = sign;
+	    sign = t3->sign; t3->sign = !t3->sign;
+	    mpi_set(v3, t3);
+	    t3->sign = sign;
+	}
+	mpi_sub(t1, u1, v1);
+	mpi_sub(t2, u2, v2);
+	mpi_sub(t3, u3, v3);
+	if( t1->sign ) {
+	    mpi_add(t1, t1, v);
+	    mpi_sub(t2, t2, u);
+	}
+    } while( mpi_cmp_ui( t3, 0 ) ); /* while t3 != 0 */
+    /* mpi_lshift( u3, k ); */
+    mpi_set(x, u1);
+
+    mpi_free(u1);
+    mpi_free(u2);
+    mpi_free(u3);
+    mpi_free(v1);
+    mpi_free(v2);
+    mpi_free(v3);
+    mpi_free(t1);
+    mpi_free(t2);
+    mpi_free(t3);
+  #else
+    /* Extended Euclid's algorithm (See TAOPC Vol II, 4.5.2, Alg X)
+     * modified according to Michael Penk's solution for Exercice 35
+     * with further enhancement */
+    MPI u, v, u1, u2=NULL, u3, v1, v2=NULL, v3, t1, t2=NULL, t3;
+    unsigned k;
+    int sign;
+    int odd ;
+
+    u = mpi_copy(a);
+    v = mpi_copy(n);
+
+    for(k=0; !mpi_test_bit(u,0) && !mpi_test_bit(v,0); k++ ) {
+	mpi_rshift(u, u, 1);
+	mpi_rshift(v, v, 1);
+    }
+    odd = mpi_test_bit(v,0);
+
+    u1 = mpi_alloc_set_ui(1);
+    if( !odd )
+	u2 = mpi_alloc_set_ui(0);
+    u3 = mpi_copy(u);
+    v1 = mpi_copy(v);
+    if( !odd ) {
+	v2 = mpi_alloc( mpi_get_nlimbs(u) );
+	mpi_sub( v2, u1, u ); /* U is used as const 1 */
+    }
+    v3 = mpi_copy(v);
+    if( mpi_test_bit(u, 0) ) { /* u is odd */
+	t1 = mpi_alloc_set_ui(0);
+	if( !odd ) {
+	    t2 = mpi_alloc_set_ui(1); t2->sign = 1;
+	}
+	t3 = mpi_copy(v); t3->sign = !t3->sign;
+	goto Y4;
+    }
+    else {
+	t1 = mpi_alloc_set_ui(1);
+	if( !odd )
+	    t2 = mpi_alloc_set_ui(0);
+	t3 = mpi_copy(u);
+    }
+    do {
+	do {
+	    if( !odd ) {
+		if( mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0) ) { /* one is odd */
+		    mpi_add(t1, t1, v);
+		    mpi_sub(t2, t2, u);
+		}
+		mpi_rshift(t1, t1, 1);
+		mpi_rshift(t2, t2, 1);
+		mpi_rshift(t3, t3, 1);
+	    }
+	    else {
+		if( mpi_test_bit(t1, 0) )
+		    mpi_add(t1, t1, v);
+		mpi_rshift(t1, t1, 1);
+		mpi_rshift(t3, t3, 1);
+	    }
+	  Y4:
+	    ;
+	} while( !mpi_test_bit( t3, 0 ) ); /* while t3 is even */
+
+	if( !t3->sign ) {
+	    mpi_set(u1, t1);
+	    if( !odd )
+		mpi_set(u2, t2);
+	    mpi_set(u3, t3);
+	}
+	else {
+	    mpi_sub(v1, v, t1);
+	    sign = u->sign; u->sign = !u->sign;
+	    if( !odd )
+		mpi_sub(v2, u, t2);
+	    u->sign = sign;
+	    sign = t3->sign; t3->sign = !t3->sign;
+	    mpi_set(v3, t3);
+	    t3->sign = sign;
+	}
+	mpi_sub(t1, u1, v1);
+	if( !odd )
+	    mpi_sub(t2, u2, v2);
+	mpi_sub(t3, u3, v3);
+	if( t1->sign ) {
+	    mpi_add(t1, t1, v);
+	    if( !odd )
+		mpi_sub(t2, t2, u);
+	}
+    } while( mpi_cmp_ui( t3, 0 ) ); /* while t3 != 0 */
+    /* mpi_lshift( u3, k ); */
+    mpi_set(x, u1);
+
+    mpi_free(u1);
+    mpi_free(v1);
+    mpi_free(t1);
+    if( !odd ) {
+	mpi_free(u2);
+	mpi_free(v2);
+	mpi_free(t2);
+    }
+    mpi_free(u3);
+    mpi_free(v3);
+    mpi_free(t3);
+
+    mpi_free(u);
+    mpi_free(v);
+  #endif
+}
+
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-mpow.c b/security/rsa/gnupg_mpi_mpi-mpow.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-mpow.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,98 @@
+/* mpi-mpow.c  -  MPI functions
+ * Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+
+static int
+build_index( MPI *exparray, int k, int i, int t )
+{
+    int j, bitno;
+    int index = 0;
+
+    bitno = t-i;
+    for(j=k-1; j >= 0; j-- ) {
+	index <<= 1;
+	if( mpi_test_bit( exparray[j], bitno ) )
+	    index |= 1;
+    }
+    return index;
+}
+
+/****************
+ * RES = (BASE[0] ^ EXP[0]) *  (BASE[1] ^ EXP[1]) * ... * mod M
+ */
+void
+mpi_mulpowm( MPI res, MPI *basearray, MPI *exparray, MPI m)
+{
+    int k;	/* number of elements */
+    int t;	/* bit size of largest exponent */
+    int i, j, idx;
+    MPI *G;	/* table with precomputed values of size 2^k */
+    MPI tmp;
+
+    for(k=0; basearray[k]; k++ )
+	;
+    assert(k);
+    for(t=0, i=0; (tmp=exparray[i]); i++ ) {
+	j = mpi_get_nbits(tmp);
+	if( j > t )
+	    t = j;
+    }
+    assert(i==k);
+    assert(t);
+    assert( k < 10 );
+
+    G = m_alloc_clear( (1<<k) * sizeof *G );
+    /* and calculate */
+    tmp =  mpi_alloc( mpi_get_nlimbs(m)+1 );
+    mpi_set_ui( res, 1 );
+    for(i = 1; i <= t; i++ ) {
+	mpi_mulm(tmp, res, res, m );
+	idx = build_index( exparray, k, i, t );
+	assert( idx >= 0 && idx < (1<<k) );
+	if( !G[idx] ) {
+	    if( !idx )
+		 G[0] = mpi_alloc_set_ui( 1 );
+	    else {
+		for(j=0; j < k; j++ ) {
+		    if( (idx & (1<<j) ) ) {
+			if( !G[idx] )
+			    G[idx] = mpi_copy( basearray[j] );
+			else
+			    mpi_mulm( G[idx], G[idx], basearray[j], m );
+		    }
+		}
+		if( !G[idx] )
+		    G[idx] = mpi_alloc(0);
+	    }
+	}
+	mpi_mulm(res, tmp, G[idx], m );
+    }
+
+    /* cleanup */
+    mpi_free(tmp);
+    for(i=0; i < (1<<k); i++ )
+	mpi_free(G[i]);
+    m_free(G);
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-mul.c b/security/rsa/gnupg_mpi_mpi-mul.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-mul.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,212 @@
+/* mpi-mul.c  -  MPI functions
+ *	Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ *	Copyright (C) 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+
+
+void
+mpi_mul_ui( MPI prod, MPI mult, unsigned long small_mult )
+{
+    mpi_size_t size, prod_size;
+    mpi_ptr_t  prod_ptr;
+    mpi_limb_t cy;
+    int sign;
+
+    size = mult->nlimbs;
+    sign = mult->sign;
+
+    if( !size || !small_mult ) {
+	prod->nlimbs = 0;
+	prod->sign = 0;
+	return;
+    }
+
+    prod_size = size + 1;
+    if( prod->alloced < prod_size )
+	mpi_resize( prod, prod_size );
+    prod_ptr = prod->d;
+
+    cy = mpihelp_mul_1( prod_ptr, mult->d, size, (mpi_limb_t)small_mult );
+    if( cy )
+	prod_ptr[size++] = cy;
+    prod->nlimbs = size;
+    prod->sign = sign;
+}
+
+
+void
+mpi_mul_2exp( MPI w, MPI u, unsigned long cnt)
+{
+    mpi_size_t usize, wsize, limb_cnt;
+    mpi_ptr_t wp;
+    mpi_limb_t wlimb;
+    int usign, wsign;
+
+    usize = u->nlimbs;
+    usign = u->sign;
+
+    if( !usize ) {
+	w->nlimbs = 0;
+	w->sign = 0;
+	return;
+    }
+
+    limb_cnt = cnt / BITS_PER_MPI_LIMB;
+    wsize = usize + limb_cnt + 1;
+    if( w->alloced < wsize )
+	mpi_resize(w, wsize );
+    wp = w->d;
+    wsize = usize + limb_cnt;
+    wsign = usign;
+
+    cnt %= BITS_PER_MPI_LIMB;
+    if( cnt ) {
+	wlimb = mpihelp_lshift( wp + limb_cnt, u->d, usize, cnt );
+	if( wlimb ) {
+	    wp[wsize] = wlimb;
+	    wsize++;
+	}
+    }
+    else {
+	MPN_COPY_DECR( wp + limb_cnt, u->d, usize );
+    }
+
+    /* Zero all whole limbs at low end.  Do it here and not before calling
+     * mpn_lshift, not to lose for U == W.  */
+    MPN_ZERO( wp, limb_cnt );
+
+    w->nlimbs = wsize;
+    w->sign = wsign;
+}
+
+
+
+void
+mpi_mul( MPI w, MPI u, MPI v)
+{
+    mpi_size_t usize, vsize, wsize;
+    mpi_ptr_t up, vp, wp;
+    mpi_limb_t cy;
+    int usign, vsign, usecure, vsecure, sign_product;
+    int assign_wp=0;
+    mpi_ptr_t tmp_limb=NULL;
+
+
+    if( u->nlimbs < v->nlimbs ) { /* Swap U and V. */
+	usize = v->nlimbs;
+	usign = v->sign;
+	usecure = mpi_is_secure(v);
+	up    = v->d;
+	vsize = u->nlimbs;
+	vsign = u->sign;
+	vsecure = mpi_is_secure(u);
+	vp    = u->d;
+    }
+    else {
+	usize = u->nlimbs;
+	usign = u->sign;
+	usecure = mpi_is_secure(u);
+	up    = u->d;
+	vsize = v->nlimbs;
+	vsign = v->sign;
+	vsecure = mpi_is_secure(v);
+	vp    = v->d;
+    }
+    sign_product = usign ^ vsign;
+    wp = w->d;
+
+    /* Ensure W has space enough to store the result.  */
+    wsize = usize + vsize;
+    if ( !mpi_is_secure (w) && (mpi_is_secure (u) || mpi_is_secure (v)) ) {
+        /* w is not allocated in secure space but u or v is.  To make sure
+         * that no temporray results are stored in w, we temporary use 
+         * a newly allocated limb space for w */
+        wp = mpi_alloc_limb_space( wsize, 1 );
+        assign_wp = 2; /* mark it as 2 so that we can later copy it back to
+                        * mormal memory */
+    }
+    else if( w->alloced < wsize ) {
+	if( wp == up || wp == vp ) {
+	    wp = mpi_alloc_limb_space( wsize, mpi_is_secure(w) );
+	    assign_wp = 1;
+	}
+	else {
+	    mpi_resize(w, wsize );
+	    wp = w->d;
+	}
+    }
+    else { /* Make U and V not overlap with W.	*/
+	if( wp == up ) {
+	    /* W and U are identical.  Allocate temporary space for U.	*/
+	    up = tmp_limb = mpi_alloc_limb_space( usize, usecure  );
+	    /* Is V identical too?  Keep it identical with U.  */
+	    if( wp == vp )
+		vp = up;
+	    /* Copy to the temporary space.  */
+	    MPN_COPY( up, wp, usize );
+	}
+	else if( wp == vp ) {
+	    /* W and V are identical.  Allocate temporary space for V.	*/
+	    vp = tmp_limb = mpi_alloc_limb_space( vsize, vsecure );
+	    /* Copy to the temporary space.  */
+	    MPN_COPY( vp, wp, vsize );
+	}
+    }
+
+    if( !vsize )
+	wsize = 0;
+    else {
+	cy = mpihelp_mul( wp, up, usize, vp, vsize );
+	wsize -= cy? 0:1;
+    }
+
+    if( assign_wp ) {
+        if (assign_wp == 2) {
+            /* copy the temp wp from secure memory back to normal memory */
+	    mpi_ptr_t tmp_wp = mpi_alloc_limb_space (wsize, 0);
+	    MPN_COPY (tmp_wp, wp, wsize);
+            mpi_free_limb_space (wp);
+            wp = tmp_wp;
+        }
+	mpi_assign_limb_space( w, wp, wsize );
+    }
+    w->nlimbs = wsize;
+    w->sign = sign_product;
+    if( tmp_limb )
+	mpi_free_limb_space( tmp_limb );
+}
+
+
+void
+mpi_mulm( MPI w, MPI u, MPI v, MPI m)
+{
+    mpi_mul(w, u, v);
+    mpi_fdiv_r( w, w, m );
+}
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-pow.c b/security/rsa/gnupg_mpi_mpi-pow.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-pow.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,290 @@
+/* mpi-pow.c  -  MPI functions
+ *	Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include <linux/string.h>
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+
+/****************
+ * RES = BASE ^ EXP mod MOD
+ */
+void
+mpi_powm( MPI res, MPI base, MPI exp, MPI mod)
+{
+    mpi_ptr_t  rp, ep, mp, bp;
+    mpi_size_t esize, msize, bsize, rsize;
+    int        esign, msign, bsign, rsign;
+    int        esec,  msec,  bsec,  rsec;
+    mpi_size_t size;
+    int mod_shift_cnt;
+    int negative_result;
+    mpi_ptr_t mp_marker=NULL, bp_marker=NULL, ep_marker=NULL;
+    mpi_ptr_t xp_marker=NULL;
+    int assign_rp=0;
+    mpi_ptr_t tspace = NULL;
+    mpi_size_t tsize=0;   /* to avoid compiler warning */
+			  /* fixme: we should check that the warning is void*/
+
+    esize = exp->nlimbs;
+    msize = mod->nlimbs;
+    size = 2 * msize;
+    esign = exp->sign;
+    msign = mod->sign;
+
+    esec = mpi_is_secure(exp);
+    msec = mpi_is_secure(mod);
+    bsec = mpi_is_secure(base);
+    rsec = mpi_is_secure(res);
+
+    rp = res->d;
+    ep = exp->d;
+
+    if( !msize )
+	msize = 1 / msize;	    /* provoke a signal */
+
+    if( !esize ) {
+	/* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+	 * depending on if MOD equals 1.  */
+	rp[0] = 1;
+	res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
+	res->sign = 0;
+	goto leave;
+    }
+
+    /* Normalize MOD (i.e. make its most significant bit set) as required by
+     * mpn_divrem.  This will make the intermediate values in the calculation
+     * slightly larger, but the correct result is obtained after a final
+     * reduction using the original MOD value.	*/
+    mp = mp_marker = mpi_alloc_limb_space(msize, msec);
+    count_leading_zeros( mod_shift_cnt, mod->d[msize-1] );
+    if( mod_shift_cnt )
+	mpihelp_lshift( mp, mod->d, msize, mod_shift_cnt );
+    else
+	MPN_COPY( mp, mod->d, msize );
+
+    bsize = base->nlimbs;
+    bsign = base->sign;
+    if( bsize > msize ) { /* The base is larger than the module. Reduce it. */
+	/* Allocate (BSIZE + 1) with space for remainder and quotient.
+	 * (The quotient is (bsize - msize + 1) limbs.)  */
+	bp = bp_marker = mpi_alloc_limb_space( bsize + 1, bsec );
+	MPN_COPY( bp, base->d, bsize );
+	/* We don't care about the quotient, store it above the remainder,
+	 * at BP + MSIZE.  */
+	mpihelp_divrem( bp + msize, 0, bp, bsize, mp, msize );
+	bsize = msize;
+	/* Canonicalize the base, since we are going to multiply with it
+	 * quite a few times.  */
+	MPN_NORMALIZE( bp, bsize );
+    }
+    else
+	bp = base->d;
+
+    if( !bsize ) {
+	res->nlimbs = 0;
+	res->sign = 0;
+	goto leave;
+    }
+
+    if( res->alloced < size ) {
+	/* We have to allocate more space for RES.  If any of the input
+	 * parameters are identical to RES, defer deallocation of the old
+	 * space.  */
+	if( rp == ep || rp == mp || rp == bp ) {
+	    rp = mpi_alloc_limb_space( size, rsec );
+	    assign_rp = 1;
+	}
+	else {
+	    mpi_resize( res, size );
+	    rp = res->d;
+	}
+    }
+    else { /* Make BASE, EXP and MOD not overlap with RES.  */
+	if( rp == bp ) {
+	    /* RES and BASE are identical.  Allocate temp. space for BASE.  */
+	    assert( !bp_marker );
+	    bp = bp_marker = mpi_alloc_limb_space( bsize, bsec );
+	    MPN_COPY(bp, rp, bsize);
+	}
+	if( rp == ep ) {
+	    /* RES and EXP are identical.  Allocate temp. space for EXP.  */
+	    ep = ep_marker = mpi_alloc_limb_space( esize, esec );
+	    MPN_COPY(ep, rp, esize);
+	}
+	if( rp == mp ) {
+	    /* RES and MOD are identical.  Allocate temporary space for MOD.*/
+	    assert( !mp_marker );
+	    mp = mp_marker = mpi_alloc_limb_space( msize, msec );
+	    MPN_COPY(mp, rp, msize);
+	}
+    }
+
+    MPN_COPY( rp, bp, bsize );
+    rsize = bsize;
+    rsign = bsign;
+
+    {
+	mpi_size_t i;
+	mpi_ptr_t xp = xp_marker = mpi_alloc_limb_space( 2 * (msize + 1), msec );
+	int c;
+	mpi_limb_t e;
+	mpi_limb_t carry_limb;
+	struct karatsuba_ctx karactx;
+
+	memset( &karactx, 0, sizeof karactx );
+	negative_result = (ep[0] & 1) && base->sign;
+
+	i = esize - 1;
+	e = ep[i];
+	count_leading_zeros (c, e);
+	e = (e << c) << 1;     /* shift the exp bits to the left, lose msb */
+	c = BITS_PER_MPI_LIMB - 1 - c;
+
+	/* Main loop.
+	 *
+	 * Make the result be pointed to alternately by XP and RP.  This
+	 * helps us avoid block copying, which would otherwise be necessary
+	 * with the overlap restrictions of mpihelp_divmod. With 50% probability
+	 * the result after this loop will be in the area originally pointed
+	 * by RP (==RES->d), and with 50% probability in the area originally
+	 * pointed to by XP.
+	 */
+
+	for(;;) {
+	    while( c ) {
+		mpi_ptr_t tp;
+		mpi_size_t xsize;
+
+		/*mpihelp_mul_n(xp, rp, rp, rsize);*/
+		if( rsize < KARATSUBA_THRESHOLD )
+		    mpih_sqr_n_basecase( xp, rp, rsize );
+		else {
+		    if( !tspace ) {
+			tsize = 2 * rsize;
+			tspace = mpi_alloc_limb_space( tsize, 0 );
+		    }
+		    else if( tsize < (2*rsize) ) {
+			mpi_free_limb_space( tspace );
+			tsize = 2 * rsize;
+			tspace = mpi_alloc_limb_space( tsize, 0 );
+		    }
+		    mpih_sqr_n( xp, rp, rsize, tspace );
+		}
+
+		xsize = 2 * rsize;
+		if( xsize > msize ) {
+		    mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize);
+		    xsize = msize;
+		}
+
+		tp = rp; rp = xp; xp = tp;
+		rsize = xsize;
+
+		if( (mpi_limb_signed_t)e < 0 ) {
+		    /*mpihelp_mul( xp, rp, rsize, bp, bsize );*/
+		    if( bsize < KARATSUBA_THRESHOLD ) {
+			mpihelp_mul( xp, rp, rsize, bp, bsize );
+		    }
+		    else {
+			mpihelp_mul_karatsuba_case(
+				     xp, rp, rsize, bp, bsize, &karactx );
+		    }
+
+		    xsize = rsize + bsize;
+		    if( xsize > msize ) {
+			mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize);
+			xsize = msize;
+		    }
+
+		    tp = rp; rp = xp; xp = tp;
+		    rsize = xsize;
+		}
+		e <<= 1;
+		c--;
+	    }
+
+	    i--;
+	    if( i < 0 )
+		break;
+	    e = ep[i];
+	    c = BITS_PER_MPI_LIMB;
+	}
+
+	/* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT
+	 * steps.  Adjust the result by reducing it with the original MOD.
+	 *
+	 * Also make sure the result is put in RES->d (where it already
+	 * might be, see above).
+	 */
+	if( mod_shift_cnt ) {
+	    carry_limb = mpihelp_lshift( res->d, rp, rsize, mod_shift_cnt);
+	    rp = res->d;
+	    if( carry_limb ) {
+		rp[rsize] = carry_limb;
+		rsize++;
+	    }
+	}
+	else {
+	    MPN_COPY( res->d, rp, rsize);
+	    rp = res->d;
+	}
+
+	if( rsize >= msize ) {
+	    mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize);
+	    rsize = msize;
+	}
+
+	/* Remove any leading zero words from the result.  */
+	if( mod_shift_cnt )
+	    mpihelp_rshift( rp, rp, rsize, mod_shift_cnt);
+	MPN_NORMALIZE (rp, rsize);
+
+	mpihelp_release_karatsuba_ctx( &karactx );
+    }
+
+    if( negative_result && rsize ) {
+	if( mod_shift_cnt )
+	    mpihelp_rshift( mp, mp, msize, mod_shift_cnt);
+	mpihelp_sub( rp, mp, msize, rp, rsize);
+	rsize = msize;
+	rsign = msign;
+	MPN_NORMALIZE(rp, rsize);
+    }
+    res->nlimbs = rsize;
+    res->sign = rsign;
+
+  leave:
+    if( assign_rp ) mpi_assign_limb_space( res, rp, size );
+    if( mp_marker ) mpi_free_limb_space( mp_marker );
+    if( bp_marker ) mpi_free_limb_space( bp_marker );
+    if( ep_marker ) mpi_free_limb_space( ep_marker );
+    if( xp_marker ) mpi_free_limb_space( xp_marker );
+    if( tspace )    mpi_free_limb_space( tspace );
+}
+
diff -Nru a/security/rsa/gnupg_mpi_mpi-scan.c b/security/rsa/gnupg_mpi_mpi-scan.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi-scan.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,129 @@
+/* mpi-scan.c  -  MPI functions
+ * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+/****************
+ * Scan through an mpi and return byte for byte. a -1 is returned to indicate
+ * the end of the mpi. Scanning is done from the lsb to the msb, returned
+ * values are in the range of 0 .. 255.
+ *
+ * FIXME: This code is VERY ugly!
+ */
+int
+mpi_getbyte( MPI a, unsigned idx )
+{
+    int i, j;
+    unsigned n;
+    mpi_ptr_t ap;
+    mpi_limb_t limb;
+
+    ap = a->d;
+    for(n=0,i=0; i < a->nlimbs; i++ ) {
+	limb = ap[i];
+	for( j=0; j < BYTES_PER_MPI_LIMB; j++, n++ )
+	    if( n == idx )
+		return (limb >> j*8) & 0xff;
+    }
+    return -1;
+}
+
+
+/****************
+ * Put a value at position IDX into A. idx counts from lsb to msb
+ */
+void
+mpi_putbyte( MPI a, unsigned idx, int xc )
+{
+    int i, j;
+    unsigned n;
+    mpi_ptr_t ap;
+    mpi_limb_t limb, c;
+
+    c = xc & 0xff;
+    ap = a->d;
+    for(n=0,i=0; i < a->alloced; i++ ) {
+	limb = ap[i];
+	for( j=0; j < BYTES_PER_MPI_LIMB; j++, n++ )
+	    if( n == idx ) {
+	      #if BYTES_PER_MPI_LIMB == 4
+		if( j == 0 )
+		    limb = (limb & 0xffffff00) | c;
+		else if( j == 1 )
+		    limb = (limb & 0xffff00ff) | (c<<8);
+		else if( j == 2 )
+		    limb = (limb & 0xff00ffff) | (c<<16);
+		else
+		    limb = (limb & 0x00ffffff) | (c<<24);
+	      #elif BYTES_PER_MPI_LIMB == 8
+		if( j == 0 )
+		    limb = (limb & 0xffffffffffffff00) | c;
+		else if( j == 1 )
+		    limb = (limb & 0xffffffffffff00ff) | (c<<8);
+		else if( j == 2 )
+		    limb = (limb & 0xffffffffff00ffff) | (c<<16);
+		else if( j == 3 )
+		    limb = (limb & 0xffffffff00ffffff) | (c<<24);
+		else if( j == 4 )
+		    limb = (limb & 0xffffff00ffffffff) | (c<<32);
+		else if( j == 5 )
+		    limb = (limb & 0xffff00ffffffffff) | (c<<40);
+		else if( j == 6 )
+		    limb = (limb & 0xff00ffffffffffff) | (c<<48);
+		else
+		    limb = (limb & 0x00ffffffffffffff) | (c<<56);
+	      #else
+		 #error please enhance this function, its ugly - i know.
+	      #endif
+		if( a->nlimbs <= i )
+		    a->nlimbs = i+1;
+		ap[i] = limb;
+		return;
+	    }
+    }
+    log_bug("index out of range\n");
+}
+
+
+/****************
+ * Count the number of zerobits at the low end of A
+ */
+unsigned
+mpi_trailing_zeros( MPI a )
+{
+    unsigned n, count = 0;
+
+    for(n=0; n < a->nlimbs; n++ ) {
+	if( a->d[n] ) {
+	    unsigned nn;
+	    mpi_limb_t alimb = a->d[n];
+
+	    count_trailing_zeros( nn, alimb );
+	    count += nn;
+	    break;
+	}
+	count += BITS_PER_MPI_LIMB;
+    }
+    return count;
+
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpi.h b/security/rsa/gnupg_mpi_mpi.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpi.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,185 @@
+/* mpi.h  -  Multi Precision Integers
+ *	Copyright (C) 1994, 1996, 1998, 1999,
+ *                    2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GNUPG.
+ *
+ * GNUPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GNUPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#ifndef G10_MPI_H
+#define G10_MPI_H
+
+#include "gnupg_mpi_config.h"
+#include "gnupg_mpi_types.h"
+#include "gnupg_mpi_memory.h" 
+
+/* DSI defines */
+
+#define SHA1_DIGEST_LENGTH   20
+
+/*end of DSI defines */
+
+#define log_debug printk
+#define log_bug printk
+
+#define assert(x) do { \
+               if (!x) log_bug("failed assertion\n"); \
+	       } while(0);
+
+#if BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_INT
+  typedef unsigned int mpi_limb_t;
+  typedef   signed int mpi_limb_signed_t;
+#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG
+  typedef unsigned long int mpi_limb_t;
+  typedef   signed long int mpi_limb_signed_t;
+#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG_LONG
+  typedef unsigned long long int mpi_limb_t;
+  typedef   signed long long int mpi_limb_signed_t;
+#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_SHORT
+  typedef unsigned short int mpi_limb_t;
+  typedef   signed short int mpi_limb_signed_t;
+#else
+  #error BYTES_PER_MPI_LIMB does not match any C type
+#endif
+#define BITS_PER_MPI_LIMB    (8*BYTES_PER_MPI_LIMB)
+
+#ifndef EXTERN_UNLESS_MAIN_MODULE
+ #if defined (__riscos__) && !defined (INCLUDED_BY_MAIN_MODULE)
+  #define EXTERN_UNLESS_MAIN_MODULE extern
+ #else
+  #define EXTERN_UNLESS_MAIN_MODULE 
+ #endif
+#endif
+
+#define DBG_MPI     mpi_debug_mode
+EXTERN_UNLESS_MAIN_MODULE int mpi_debug_mode;
+
+
+struct gcry_mpi {
+    int alloced;    /* array size (# of allocated limbs) */
+    int nlimbs;     /* number of valid limbs */
+    int nbits;	    /* the real number of valid bits (info only) */
+    int sign;	    /* indicates a negative number */
+    unsigned flags; /* bit 0: array must be allocated in secure memory space */
+		    /* bit 1: not used */
+		    /* bit 2: the limb is a pointer to some m_alloced data */
+    mpi_limb_t *d;  /* array with the limbs */
+};
+
+typedef struct gcry_mpi *MPI;
+
+#define MPI_NULL NULL
+
+#define mpi_get_nlimbs(a)     ((a)->nlimbs)
+#define mpi_is_neg(a)	      ((a)->sign)
+
+/*-- mpiutil.c --*/
+
+MPI mpi_alloc( unsigned nlimbs );
+MPI mpi_alloc_secure( unsigned nlimbs );
+MPI mpi_alloc_like( MPI a );
+void mpi_free( MPI a );
+void mpi_resize( MPI a, unsigned nlimbs );
+MPI  mpi_copy( MPI a );
+#define mpi_is_opaque(a) ((a) && ((a)->flags&4))
+MPI mpi_set_opaque( MPI a, void *p, int len );
+void *mpi_get_opaque( MPI a, int *len );
+#define mpi_is_secure(a) ((a) && ((a)->flags&1))
+void mpi_set_secure( MPI a );
+void mpi_clear( MPI a );
+void mpi_set( MPI w, MPI u);
+void mpi_set_ui( MPI w, ulong u);
+MPI  mpi_alloc_set_ui( unsigned long u);
+void mpi_m_check( MPI a );
+void mpi_swap( MPI a, MPI b);
+
+/*-- mpicoder.c --*/
+MPI do_encode_md(const byte *sha_buffer, unsigned nbits);
+MPI mpi_read_from_buffer(byte *buffer, unsigned *ret_nread, int secure);
+int mpi_fromstr(MPI val, const char *str);
+u32 mpi_get_keyid( MPI a, u32 *keyid );
+byte *mpi_get_buffer( MPI a, unsigned *nbytes, int *sign );
+byte *mpi_get_secure_buffer( MPI a, unsigned *nbytes, int *sign );
+void  mpi_set_buffer( MPI a, const byte *buffer, unsigned nbytes, int sign );
+
+#define log_mpidump g10_log_mpidump
+
+/*-- mpi-add.c --*/
+void mpi_add_ui(MPI w, MPI u, ulong v );
+void mpi_add(MPI w, MPI u, MPI v);
+void mpi_addm(MPI w, MPI u, MPI v, MPI m);
+void mpi_sub_ui(MPI w, MPI u, ulong v );
+void mpi_sub( MPI w, MPI u, MPI v);
+void mpi_subm( MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-mul.c --*/
+void mpi_mul_ui(MPI w, MPI u, ulong v );
+void mpi_mul_2exp( MPI w, MPI u, ulong cnt);
+void mpi_mul( MPI w, MPI u, MPI v);
+void mpi_mulm( MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-div.c --*/
+ulong mpi_fdiv_r_ui( MPI rem, MPI dividend, ulong divisor );
+void  mpi_fdiv_r( MPI rem, MPI dividend, MPI divisor );
+void  mpi_fdiv_q( MPI quot, MPI dividend, MPI divisor );
+void  mpi_fdiv_qr( MPI quot, MPI rem, MPI dividend, MPI divisor );
+void  mpi_tdiv_r( MPI rem, MPI num, MPI den);
+void  mpi_tdiv_qr( MPI quot, MPI rem, MPI num, MPI den);
+void  mpi_tdiv_q_2exp( MPI w, MPI u, unsigned count );
+int   mpi_divisible_ui(MPI dividend, ulong divisor );
+
+/*-- mpi-gcd.c --*/
+int mpi_gcd( MPI g, MPI a, MPI b );
+
+/*-- mpi-pow.c --*/
+void mpi_pow( MPI w, MPI u, MPI v);
+void mpi_powm( MPI res, MPI base, MPI exp, MPI mod);
+
+/*-- mpi-mpow.c --*/
+void mpi_mulpowm( MPI res, MPI *basearray, MPI *exparray, MPI mod);
+
+/*-- mpi-cmp.c --*/
+int mpi_cmp_ui( MPI u, ulong v );
+int mpi_cmp( MPI u, MPI v );
+
+/*-- mpi-scan.c --*/
+int mpi_getbyte( MPI a, unsigned idx );
+void mpi_putbyte( MPI a, unsigned idx, int value );
+unsigned mpi_trailing_zeros( MPI a );
+
+/*-- mpi-bit.c --*/
+void mpi_normalize( MPI a );
+unsigned mpi_get_nbits( MPI a );
+int  mpi_test_bit( MPI a, unsigned n );
+void mpi_set_bit( MPI a, unsigned n );
+void mpi_set_highbit( MPI a, unsigned n );
+void mpi_clear_highbit( MPI a, unsigned n );
+void mpi_clear_bit( MPI a, unsigned n );
+void mpi_rshift( MPI x, MPI a, unsigned n );
+
+/*-- mpi-inv.c --*/
+void mpi_invm( MPI x, MPI u, MPI v );
+
+
+#endif /*G10_MPI_H*/
diff -Nru a/security/rsa/gnupg_mpi_mpicoder.c b/security/rsa/gnupg_mpi_mpicoder.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpicoder.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,363 @@
+/* mpicoder.c  -  Coder for the external representation of MPIs
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include <linux/string.h>
+#include "gnupg_mpi_mpi.h"
+#include "gnupg_mpi_mpi-internal.h"
+
+#define DIM(v) (sizeof(v)/sizeof((v)[0]))
+#define MAX_EXTERN_MPI_BITS 16384
+
+
+static byte asn[15] = /* Object ID is 1.3.14.3.2.26 */
+  { 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03,
+    0x02, 0x1a, 0x05, 0x00, 0x04, 0x14 };
+ 
+
+
+MPI
+do_encode_md(const byte *sha_buffer, unsigned nbits)
+{
+  int nframe = (nbits+7) / 8;
+  byte *frame, *fr_pt;
+  int i = 0, n;
+  size_t asnlen = DIM(asn);
+  MPI a = MPI_NULL;
+
+  if(SHA1_DIGEST_LENGTH + asnlen + 4  > nframe )
+    log_bug("can't encode a %d bit MD into a %d bits frame\n",
+	    (int)(SHA1_DIGEST_LENGTH*8), (int)nbits);
+
+  /* We encode the MD in this way:
+   *
+   *	   0  A PAD(n bytes)   0  ASN(asnlen bytes)  MD(len bytes)
+   *
+   * PAD consists of FF bytes.
+   */
+  frame = (byte*)m_alloc(nframe);
+  n = 0;
+  frame[n++] = 0;
+  frame[n++] = 1; /* block type */
+  i = nframe - SHA1_DIGEST_LENGTH - asnlen -3 ;
+  
+  if(i <= 1) {
+    log_bug("message digest encoding failed\n");
+    m_free(frame);
+    return a;
+  }
+
+  memset( frame+n, 0xff, i ); n += i;
+  frame[n++] = 0;
+  memcpy( frame+n, &asn, asnlen ); n += asnlen;
+  memcpy( frame+n, sha_buffer, SHA1_DIGEST_LENGTH ); n += SHA1_DIGEST_LENGTH;
+  
+  i = nframe;
+  fr_pt = frame;
+
+  if (n != nframe) {
+    log_bug("message digest encoding failed, frame length is wrong\n");
+    m_free(frame);
+    return a;
+  }
+  
+  a = mpi_alloc( (nframe+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB );
+  mpi_set_buffer( a, frame, nframe, 0 );
+  m_free(frame);
+
+  return a;
+}
+
+
+MPI
+mpi_read_from_buffer(byte *buffer, unsigned *ret_nread, int secure)
+{
+  int i, j;
+  unsigned nbits, nbytes, nlimbs, nread=0;
+  mpi_limb_t a;
+  MPI val = MPI_NULL;
+
+  if( *ret_nread < 2 )
+    goto leave;
+  nbits = buffer[0] << 8 | buffer[1];
+
+  if( nbits > MAX_EXTERN_MPI_BITS ) {
+    log_bug("mpi too large (%u bits)\n", nbits);
+    goto leave;
+  }
+  buffer += 2;
+  nread = 2;
+
+  nbytes = (nbits+7) / 8;
+  nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB;
+  val = secure? mpi_alloc_secure( nlimbs )
+    : mpi_alloc( nlimbs );
+  i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
+  i %= BYTES_PER_MPI_LIMB;
+  val->nbits = nbits;
+  j= val->nlimbs = nlimbs;
+  val->sign = 0;
+  for( ; j > 0; j-- ) {
+    a = 0;
+    for(; i < BYTES_PER_MPI_LIMB; i++ ) {
+      if( ++nread > *ret_nread ) {
+	log_bug("mpi larger than buffer nread=%d ret_nread=%d\n", nread, *ret_nread);
+	goto leave;
+      }
+      a <<= 8;
+      a |= *buffer++;
+    }
+    i = 0;
+    val->d[j-1] = a;
+  }
+
+ leave:
+  *ret_nread = nread;
+  return val;
+}
+
+
+/****************
+ * Make an mpi from a character string.
+ */
+int
+mpi_fromstr(MPI val, const char *str)
+{
+    int hexmode=0, sign=0, prepend_zero=0, i, j, c, c1, c2;
+    unsigned nbits, nbytes, nlimbs;
+    mpi_limb_t a;
+
+    if( *str == '-' ) {
+	sign = 1;
+	str++;
+    }
+    if( *str == '0' && str[1] == 'x' )
+	hexmode = 1;
+    else
+	return 1; /* other bases are not yet supported */
+    str += 2;
+
+    nbits = strlen(str)*4;
+    if( nbits % 8 )
+	prepend_zero = 1;
+    nbytes = (nbits+7) / 8;
+    nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB;
+    if( val->alloced < nlimbs )
+	mpi_resize(val, nlimbs );
+    i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
+    i %= BYTES_PER_MPI_LIMB;
+    j= val->nlimbs = nlimbs;
+    val->sign = sign;
+    for( ; j > 0; j-- ) {
+	a = 0;
+	for(; i < BYTES_PER_MPI_LIMB; i++ ) {
+	    if( prepend_zero ) {
+		c1 = '0';
+		prepend_zero = 0;
+	    }
+	    else
+		c1 = *str++;
+	    assert(c1);
+	    c2 = *str++;
+	    assert(c2);
+	    if( c1 >= '0' && c1 <= '9' )
+		c = c1 - '0';
+	    else if( c1 >= 'a' && c1 <= 'f' )
+		c = c1 - 'a' + 10;
+	    else if( c1 >= 'A' && c1 <= 'F' )
+		c = c1 - 'A' + 10;
+	    else {
+		mpi_clear(val);
+		return 1;
+	    }
+	    c <<= 4;
+	    if( c2 >= '0' && c2 <= '9' )
+		c |= c2 - '0';
+	    else if( c2 >= 'a' && c2 <= 'f' )
+		c |= c2 - 'a' + 10;
+	    else if( c2 >= 'A' && c2 <= 'F' )
+		c |= c2 - 'A' + 10;
+	    else {
+		mpi_clear(val);
+		return 1;
+	    }
+	    a <<= 8;
+	    a |= c;
+	}
+	i = 0;
+
+	val->d[j-1] = a;
+    }
+
+    return 0;
+}
+
+
+/****************
+ * Special function to get the low 8 bytes from an mpi.
+ * This can be used as a keyid; KEYID is an 2 element array.
+ * Return the low 4 bytes.
+ */
+u32
+mpi_get_keyid( MPI a, u32 *keyid )
+{
+#if BYTES_PER_MPI_LIMB == 4
+    if( keyid ) {
+	keyid[0] = a->nlimbs >= 2? a->d[1] : 0;
+	keyid[1] = a->nlimbs >= 1? a->d[0] : 0;
+    }
+    return a->nlimbs >= 1? a->d[0] : 0;
+#elif BYTES_PER_MPI_LIMB == 8
+    if( keyid ) {
+	keyid[0] = a->nlimbs? (u32)(a->d[0] >> 32) : 0;
+	keyid[1] = a->nlimbs? (u32)(a->d[0] & 0xffffffff) : 0;
+    }
+    return a->nlimbs? (u32)(a->d[0] & 0xffffffff) : 0;
+#else
+  #error Make this function work with other LIMB sizes
+#endif
+}
+
+
+/****************
+ * Return an m_alloced buffer with the MPI (msb first).
+ * NBYTES receives the length of this buffer. Caller must free the
+ * return string (This function does return a 0 byte buffer with NBYTES
+ * set to zero if the value of A is zero. If sign is not NULL, it will
+ * be set to the sign of the A.
+ */
+static byte *
+do_get_buffer( MPI a, unsigned *nbytes, int *sign, int force_secure )
+{
+    byte *p, *buffer;
+    mpi_limb_t alimb;
+    int i;
+    unsigned int n;
+
+    if( sign )
+	*sign = a->sign;
+    *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB;
+    if (!n)
+      n++; /* avoid zero length allocation */
+    p = buffer = force_secure || mpi_is_secure(a) ? m_alloc_secure(n)
+						  : m_alloc(n);
+
+    for(i=a->nlimbs-1; i >= 0; i-- ) {
+	alimb = a->d[i];
+#if BYTES_PER_MPI_LIMB == 4
+	*p++ = alimb >> 24;
+	*p++ = alimb >> 16;
+	*p++ = alimb >>  8;
+	*p++ = alimb	  ;
+#elif BYTES_PER_MPI_LIMB == 8
+	*p++ = alimb >> 56;
+	*p++ = alimb >> 48;
+	*p++ = alimb >> 40;
+	*p++ = alimb >> 32;
+	*p++ = alimb >> 24;
+	*p++ = alimb >> 16;
+	*p++ = alimb >>  8;
+	*p++ = alimb	  ;
+#else
+#error please implement for this limb size.
+#endif
+    }
+
+    /* this is sub-optimal but we need to do the shift operation
+     * because the caller has to free the returned buffer */
+    for(p=buffer; !*p && *nbytes; p++, --*nbytes )
+      ;
+    if( p != buffer )
+      memmove(buffer,p, *nbytes);
+
+    return buffer;
+}
+
+
+byte *
+mpi_get_buffer( MPI a, unsigned *nbytes, int *sign )
+{
+    return do_get_buffer( a, nbytes, sign, 0 );
+}
+
+byte *
+mpi_get_secure_buffer( MPI a, unsigned *nbytes, int *sign )
+{
+    return do_get_buffer( a, nbytes, sign, 1 );
+}
+
+/****************
+ * Use BUFFER to update MPI.
+ */
+void
+mpi_set_buffer( MPI a, const byte *buffer, unsigned nbytes, int sign )
+{
+    const byte *p;
+    mpi_limb_t alimb;
+    int nlimbs;
+    int i;
+
+    nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
+    RESIZE_IF_NEEDED(a, nlimbs);
+    a->sign = sign;
+
+    for(i=0, p = buffer+nbytes-1; p >= buffer+BYTES_PER_MPI_LIMB; ) {
+      #if BYTES_PER_MPI_LIMB == 4
+	alimb  = (mpi_limb_t)*p-- ;
+	alimb |= (mpi_limb_t)*p-- <<  8 ;
+	alimb |= (mpi_limb_t)*p-- << 16 ;
+	alimb |= (mpi_limb_t)*p-- << 24 ;
+      #elif BYTES_PER_MPI_LIMB == 8
+	alimb  = (mpi_limb_t)*p--	;
+	alimb |= (mpi_limb_t)*p-- <<  8 ;
+	alimb |= (mpi_limb_t)*p-- << 16 ;
+	alimb |= (mpi_limb_t)*p-- << 24 ;
+	alimb |= (mpi_limb_t)*p-- << 32 ;
+	alimb |= (mpi_limb_t)*p-- << 40 ;
+	alimb |= (mpi_limb_t)*p-- << 48 ;
+	alimb |= (mpi_limb_t)*p-- << 56 ;
+      #else
+	#error please implement for this limb size.
+      #endif
+	a->d[i++] = alimb;
+    }
+    if( p >= buffer ) {
+      #if BYTES_PER_MPI_LIMB == 4
+	alimb  = *p--	    ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- <<  8 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 16 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 24 ;
+      #elif BYTES_PER_MPI_LIMB == 8
+	alimb  = (mpi_limb_t)*p-- ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- <<	8 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 16 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 24 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 32 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 40 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 48 ;
+	if( p >= buffer ) alimb |= (mpi_limb_t)*p-- << 56 ;
+      #else
+	#error please implement for this limb size.
+      #endif
+	a->d[i++] = alimb;
+    }
+    a->nlimbs = i;
+    assert( i == nlimbs );
+}
+
diff -Nru a/security/rsa/gnupg_mpi_mpih-cmp.c b/security/rsa/gnupg_mpi_mpih-cmp.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpih-cmp.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,58 @@
+/* mpihelp-sub.c  -  MPI helper functions
+ *	Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ *	Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+
+/****************
+ * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE.
+ * There are no restrictions on the relative sizes of
+ * the two arguments.
+ * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2.
+ */
+int
+mpihelp_cmp( mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size )
+{
+    mpi_size_t i;
+    mpi_limb_t op1_word, op2_word;
+
+    for( i = size - 1; i >= 0 ; i--) {
+	op1_word = op1_ptr[i];
+	op2_word = op2_ptr[i];
+	if( op1_word != op2_word )
+	    goto diff;
+    }
+    return 0;
+
+  diff:
+    /* This can *not* be simplified to
+     *	 op2_word - op2_word
+     * since that expression might give signed overflow.  */
+    return (op1_word > op2_word) ? 1 : -1;
+}
+
diff -Nru a/security/rsa/gnupg_mpi_mpih-div.c b/security/rsa/gnupg_mpi_mpih-div.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpih-div.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,534 @@
+/* mpihelp-div.c  -  MPI helper functions
+ *	Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ *	Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+#ifndef UMUL_TIME
+  #define UMUL_TIME 1
+#endif
+#ifndef UDIV_TIME
+  #define UDIV_TIME UMUL_TIME
+#endif
+
+/* FIXME: We should be using invert_limb (or invert_normalized_limb)
+ * here (not udiv_qrnnd).
+ */
+
+mpi_limb_t
+mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+				      mpi_limb_t divisor_limb)
+{
+    mpi_size_t i;
+    mpi_limb_t n1, n0, r;
+    int dummy;
+
+    /* Botch: Should this be handled at all?  Rely on callers?	*/
+    if( !dividend_size )
+	return 0;
+
+    /* If multiplication is much faster than division, and the
+     * dividend is large, pre-invert the divisor, and use
+     * only multiplications in the inner loop.
+     *
+     * This test should be read:
+     *	 Does it ever help to use udiv_qrnnd_preinv?
+     *	   && Does what we save compensate for the inversion overhead?
+     */
+    if( UDIV_TIME > (2 * UMUL_TIME + 6)
+	&& (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) {
+	int normalization_steps;
+
+	count_leading_zeros( normalization_steps, divisor_limb );
+	if( normalization_steps ) {
+	    mpi_limb_t divisor_limb_inverted;
+
+	    divisor_limb <<= normalization_steps;
+
+	    /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+	     * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+	     * most significant bit (with weight 2**N) implicit.
+	     *
+	     * Special case for DIVISOR_LIMB == 100...000.
+	     */
+	    if( !(divisor_limb << 1) )
+		divisor_limb_inverted = ~(mpi_limb_t)0;
+	    else
+		udiv_qrnnd(divisor_limb_inverted, dummy,
+			   -divisor_limb, 0, divisor_limb);
+
+	    n1 = dividend_ptr[dividend_size - 1];
+	    r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+	    /* Possible optimization:
+	     * if (r == 0
+	     * && divisor_limb > ((n1 << normalization_steps)
+	     *		       | (dividend_ptr[dividend_size - 2] >> ...)))
+	     * ...one division less...
+	     */
+	    for( i = dividend_size - 2; i >= 0; i--) {
+		n0 = dividend_ptr[i];
+		UDIV_QRNND_PREINV(dummy, r, r,
+				   ((n1 << normalization_steps)
+			  | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+			  divisor_limb, divisor_limb_inverted);
+		n1 = n0;
+	    }
+	    UDIV_QRNND_PREINV(dummy, r, r,
+			      n1 << normalization_steps,
+			      divisor_limb, divisor_limb_inverted);
+	    return r >> normalization_steps;
+	}
+	else {
+	    mpi_limb_t divisor_limb_inverted;
+
+	    /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+	     * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+	     * most significant bit (with weight 2**N) implicit.
+	     *
+	     * Special case for DIVISOR_LIMB == 100...000.
+	     */
+	    if( !(divisor_limb << 1) )
+		divisor_limb_inverted = ~(mpi_limb_t)0;
+	    else
+		udiv_qrnnd(divisor_limb_inverted, dummy,
+			    -divisor_limb, 0, divisor_limb);
+
+	    i = dividend_size - 1;
+	    r = dividend_ptr[i];
+
+	    if( r >= divisor_limb )
+		r = 0;
+	    else
+		i--;
+
+	    for( ; i >= 0; i--) {
+		n0 = dividend_ptr[i];
+		UDIV_QRNND_PREINV(dummy, r, r,
+				  n0, divisor_limb, divisor_limb_inverted);
+	    }
+	    return r;
+	}
+    }
+    else {
+	if( UDIV_NEEDS_NORMALIZATION ) {
+	    int normalization_steps;
+
+	    count_leading_zeros(normalization_steps, divisor_limb);
+	    if( normalization_steps ) {
+		divisor_limb <<= normalization_steps;
+
+		n1 = dividend_ptr[dividend_size - 1];
+		r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+		/* Possible optimization:
+		 * if (r == 0
+		 * && divisor_limb > ((n1 << normalization_steps)
+		 *		   | (dividend_ptr[dividend_size - 2] >> ...)))
+		 * ...one division less...
+		 */
+		for(i = dividend_size - 2; i >= 0; i--) {
+		    n0 = dividend_ptr[i];
+		    udiv_qrnnd (dummy, r, r,
+				((n1 << normalization_steps)
+			 | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+			 divisor_limb);
+		    n1 = n0;
+		}
+		udiv_qrnnd (dummy, r, r,
+			    n1 << normalization_steps,
+			    divisor_limb);
+		return r >> normalization_steps;
+	    }
+	}
+	/* No normalization needed, either because udiv_qrnnd doesn't require
+	 * it, or because DIVISOR_LIMB is already normalized.  */
+	i = dividend_size - 1;
+	r = dividend_ptr[i];
+
+	if(r >= divisor_limb)
+	    r = 0;
+	else
+	    i--;
+
+	for(; i >= 0; i--) {
+	    n0 = dividend_ptr[i];
+	    udiv_qrnnd (dummy, r, r, n0, divisor_limb);
+	}
+	return r;
+    }
+}
+
+/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
+ * the NSIZE-DSIZE least significant quotient limbs at QP
+ * and the DSIZE long remainder at NP.	If QEXTRA_LIMBS is
+ * non-zero, generate that many fraction bits and append them after the
+ * other quotient limbs.
+ * Return the most significant limb of the quotient, this is always 0 or 1.
+ *
+ * Preconditions:
+ * 0. NSIZE >= DSIZE.
+ * 1. The most significant bit of the divisor must be set.
+ * 2. QP must either not overlap with the input operands at all, or
+ *    QP + DSIZE >= NP must hold true.	(This means that it's
+ *    possible to put the quotient in the high part of NUM, right after the
+ *    remainder in NUM.
+ * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
+ */
+
+mpi_limb_t
+mpihelp_divrem( mpi_ptr_t qp, mpi_size_t qextra_limbs,
+		mpi_ptr_t np, mpi_size_t nsize,
+		mpi_ptr_t dp, mpi_size_t dsize)
+{
+    mpi_limb_t most_significant_q_limb = 0;
+
+    switch(dsize) {
+      case 0:
+	/* We are asked to divide by zero, so go ahead and do it!  (To make
+	   the compiler not remove this statement, return the value.)  */
+	return 1 / dsize;
+
+      case 1:
+	{
+	    mpi_size_t i;
+	    mpi_limb_t n1;
+	    mpi_limb_t d;
+
+	    d = dp[0];
+	    n1 = np[nsize - 1];
+
+	    if( n1 >= d ) {
+		n1 -= d;
+		most_significant_q_limb = 1;
+	    }
+
+	    qp += qextra_limbs;
+	    for( i = nsize - 2; i >= 0; i--)
+		udiv_qrnnd( qp[i], n1, n1, np[i], d );
+	    qp -= qextra_limbs;
+
+	    for( i = qextra_limbs - 1; i >= 0; i-- )
+		udiv_qrnnd (qp[i], n1, n1, 0, d);
+
+	    np[0] = n1;
+	}
+	break;
+
+      case 2:
+	{
+	    mpi_size_t i;
+	    mpi_limb_t n1, n0, n2;
+	    mpi_limb_t d1, d0;
+
+	    np += nsize - 2;
+	    d1 = dp[1];
+	    d0 = dp[0];
+	    n1 = np[1];
+	    n0 = np[0];
+
+	    if( n1 >= d1 && (n1 > d1 || n0 >= d0) ) {
+		sub_ddmmss (n1, n0, n1, n0, d1, d0);
+		most_significant_q_limb = 1;
+	    }
+
+	    for( i = qextra_limbs + nsize - 2 - 1; i >= 0; i-- ) {
+		mpi_limb_t q;
+		mpi_limb_t r;
+
+		if( i >= qextra_limbs )
+		    np--;
+		else
+		    np[0] = 0;
+
+		if( n1 == d1 ) {
+		    /* Q should be either 111..111 or 111..110.  Need special
+		     * treatment of this rare case as normal division would
+		     * give overflow.  */
+		    q = ~(mpi_limb_t)0;
+
+		    r = n0 + d1;
+		    if( r < d1 ) {   /* Carry in the addition? */
+			add_ssaaaa( n1, n0, r - d0, np[0], 0, d0 );
+			qp[i] = q;
+			continue;
+		    }
+		    n1 = d0 - (d0 != 0?1:0);
+		    n0 = -d0;
+		}
+		else {
+		    udiv_qrnnd (q, r, n1, n0, d1);
+		    umul_ppmm (n1, n0, d0, q);
+		}
+
+		n2 = np[0];
+	      q_test:
+		if( n1 > r || (n1 == r && n0 > n2) ) {
+		    /* The estimated Q was too large.  */
+		    q--;
+		    sub_ddmmss (n1, n0, n1, n0, 0, d0);
+		    r += d1;
+		    if( r >= d1 )    /* If not carry, test Q again.  */
+			goto q_test;
+		}
+
+		qp[i] = q;
+		sub_ddmmss (n1, n0, r, n2, n1, n0);
+	    }
+	    np[1] = n1;
+	    np[0] = n0;
+	}
+	break;
+
+      default:
+	{
+	    mpi_size_t i;
+	    mpi_limb_t dX, d1, n0;
+
+	    np += nsize - dsize;
+	    dX = dp[dsize - 1];
+	    d1 = dp[dsize - 2];
+	    n0 = np[dsize - 1];
+
+	    if( n0 >= dX ) {
+		if(n0 > dX || mpihelp_cmp(np, dp, dsize - 1) >= 0 ) {
+		    mpihelp_sub_n(np, np, dp, dsize);
+		    n0 = np[dsize - 1];
+		    most_significant_q_limb = 1;
+		}
+	    }
+
+	    for( i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
+		mpi_limb_t q;
+		mpi_limb_t n1, n2;
+		mpi_limb_t cy_limb;
+
+		if( i >= qextra_limbs ) {
+		    np--;
+		    n2 = np[dsize];
+		}
+		else {
+		    n2 = np[dsize - 1];
+		    MPN_COPY_DECR (np + 1, np, dsize - 1);
+		    np[0] = 0;
+		}
+
+		if( n0 == dX ) {
+		    /* This might over-estimate q, but it's probably not worth
+		     * the extra code here to find out.  */
+		    q = ~(mpi_limb_t)0;
+		}
+		else {
+		    mpi_limb_t r;
+
+		    udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
+		    umul_ppmm(n1, n0, d1, q);
+
+		    while( n1 > r || (n1 == r && n0 > np[dsize - 2])) {
+			q--;
+			r += dX;
+			if( r < dX ) /* I.e. "carry in previous addition?" */
+			    break;
+			n1 -= n0 < d1;
+			n0 -= d1;
+		    }
+		}
+
+		/* Possible optimization: We already have (q * n0) and (1 * n1)
+		 * after the calculation of q.	Taking advantage of that, we
+		 * could make this loop make two iterations less.  */
+		cy_limb = mpihelp_submul_1(np, dp, dsize, q);
+
+		if( n2 != cy_limb ) {
+		    mpihelp_add_n(np, np, dp, dsize);
+		    q--;
+		}
+
+		qp[i] = q;
+		n0 = np[dsize - 1];
+	    }
+	}
+    }
+
+    return most_significant_q_limb;
+}
+
+
+/****************
+ * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
+ * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
+ * Return the single-limb remainder.
+ * There are no constraints on the value of the divisor.
+ *
+ * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
+ */
+
+mpi_limb_t
+mpihelp_divmod_1( mpi_ptr_t quot_ptr,
+		  mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+		  mpi_limb_t divisor_limb)
+{
+    mpi_size_t i;
+    mpi_limb_t n1, n0, r;
+    int dummy;
+
+    if( !dividend_size )
+	return 0;
+
+    /* If multiplication is much faster than division, and the
+     * dividend is large, pre-invert the divisor, and use
+     * only multiplications in the inner loop.
+     *
+     * This test should be read:
+     * Does it ever help to use udiv_qrnnd_preinv?
+     * && Does what we save compensate for the inversion overhead?
+     */
+    if( UDIV_TIME > (2 * UMUL_TIME + 6)
+	&& (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) {
+	int normalization_steps;
+
+	count_leading_zeros( normalization_steps, divisor_limb );
+	if( normalization_steps ) {
+	    mpi_limb_t divisor_limb_inverted;
+
+	    divisor_limb <<= normalization_steps;
+
+	    /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+	     * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+	     * most significant bit (with weight 2**N) implicit.
+	     */
+	    /* Special case for DIVISOR_LIMB == 100...000.  */
+	    if( !(divisor_limb << 1) )
+		divisor_limb_inverted = ~(mpi_limb_t)0;
+	    else
+		udiv_qrnnd(divisor_limb_inverted, dummy,
+			   -divisor_limb, 0, divisor_limb);
+
+	    n1 = dividend_ptr[dividend_size - 1];
+	    r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+	    /* Possible optimization:
+	     * if (r == 0
+	     * && divisor_limb > ((n1 << normalization_steps)
+	     *		       | (dividend_ptr[dividend_size - 2] >> ...)))
+	     * ...one division less...
+	     */
+	    for( i = dividend_size - 2; i >= 0; i--) {
+		n0 = dividend_ptr[i];
+		UDIV_QRNND_PREINV( quot_ptr[i + 1], r, r,
+				   ((n1 << normalization_steps)
+			 | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+			      divisor_limb, divisor_limb_inverted);
+		n1 = n0;
+	    }
+	    UDIV_QRNND_PREINV( quot_ptr[0], r, r,
+			       n1 << normalization_steps,
+			       divisor_limb, divisor_limb_inverted);
+	    return r >> normalization_steps;
+	}
+	else {
+	    mpi_limb_t divisor_limb_inverted;
+
+	    /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+	     * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+	     * most significant bit (with weight 2**N) implicit.
+	     */
+	    /* Special case for DIVISOR_LIMB == 100...000.  */
+	    if( !(divisor_limb << 1) )
+		divisor_limb_inverted = ~(mpi_limb_t) 0;
+	    else
+		udiv_qrnnd(divisor_limb_inverted, dummy,
+			   -divisor_limb, 0, divisor_limb);
+
+	    i = dividend_size - 1;
+	    r = dividend_ptr[i];
+
+	    if( r >= divisor_limb )
+		r = 0;
+	    else
+		quot_ptr[i--] = 0;
+
+	    for( ; i >= 0; i-- ) {
+		n0 = dividend_ptr[i];
+		UDIV_QRNND_PREINV( quot_ptr[i], r, r,
+				   n0, divisor_limb, divisor_limb_inverted);
+	    }
+	    return r;
+	}
+    }
+    else {
+	if(UDIV_NEEDS_NORMALIZATION) {
+	    int normalization_steps;
+
+	    count_leading_zeros (normalization_steps, divisor_limb);
+	    if( normalization_steps ) {
+		divisor_limb <<= normalization_steps;
+
+		n1 = dividend_ptr[dividend_size - 1];
+		r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+		/* Possible optimization:
+		 * if (r == 0
+		 * && divisor_limb > ((n1 << normalization_steps)
+		 *		   | (dividend_ptr[dividend_size - 2] >> ...)))
+		 * ...one division less...
+		 */
+		for( i = dividend_size - 2; i >= 0; i--) {
+		    n0 = dividend_ptr[i];
+		    udiv_qrnnd (quot_ptr[i + 1], r, r,
+			     ((n1 << normalization_steps)
+			 | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+				divisor_limb);
+		    n1 = n0;
+		}
+		udiv_qrnnd (quot_ptr[0], r, r,
+			    n1 << normalization_steps,
+			    divisor_limb);
+		return r >> normalization_steps;
+	    }
+	}
+	/* No normalization needed, either because udiv_qrnnd doesn't require
+	 * it, or because DIVISOR_LIMB is already normalized.  */
+	i = dividend_size - 1;
+	r = dividend_ptr[i];
+
+	if(r >= divisor_limb)
+	    r = 0;
+	else
+	    quot_ptr[i--] = 0;
+
+	for(; i >= 0; i--) {
+	    n0 = dividend_ptr[i];
+	    udiv_qrnnd( quot_ptr[i], r, r, n0, divisor_limb );
+	}
+	return r;
+    }
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpih-mul.c b/security/rsa/gnupg_mpi_mpih-mul.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpih-mul.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,525 @@
+/* mpihelp-mul.c  -  MPI helper functions
+ * Copyright (C) 1994, 1996, 1998, 1999,
+ *               2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ *	 Actually it's the same code with only minor changes in the
+ *	 way the data is stored; this is to support the abstraction
+ *	 of an optional secure memory allocation which may be used
+ *	 to avoid revealing of sensitive data due to paging etc.
+ *	 The GNU MP Library itself is published under the LGPL;
+ *	 however I decided to publish this code under the plain GPL.
+ */
+
+#include <linux/string.h>
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_longlong.h"
+
+
+
+#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
+    do {						\
+	if( (size) < KARATSUBA_THRESHOLD )		\
+	    mul_n_basecase (prodp, up, vp, size);	\
+	else						\
+	    mul_n (prodp, up, vp, size, tspace);	\
+    } while (0);
+
+#define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
+    do {					    \
+	if ((size) < KARATSUBA_THRESHOLD)	    \
+	    mpih_sqr_n_basecase (prodp, up, size);	 \
+	else					    \
+	    mpih_sqr_n (prodp, up, size, tspace);	 \
+    } while (0);
+
+
+
+
+/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
+ * both with SIZE limbs, and store the result at PRODP.  2 * SIZE limbs are
+ * always stored.  Return the most significant limb.
+ *
+ * Argument constraints:
+ * 1. PRODP != UP and PRODP != VP, i.e. the destination
+ *    must be distinct from the multiplier and the multiplicand.
+ *
+ *
+ * Handle simple cases with traditional multiplication.
+ *
+ * This is the most critical code of multiplication.  All multiplies rely
+ * on this, both small and huge.  Small ones arrive here immediately.  Huge
+ * ones arrive here as this is the base case for Karatsuba's recursive
+ * algorithm below.
+ */
+
+static mpi_limb_t
+mul_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up,
+				 mpi_ptr_t vp, mpi_size_t size)
+{
+    mpi_size_t i;
+    mpi_limb_t cy;
+    mpi_limb_t v_limb;
+
+    /* Multiply by the first limb in V separately, as the result can be
+     * stored (not added) to PROD.  We also avoid a loop for zeroing.  */
+    v_limb = vp[0];
+    if( v_limb <= 1 ) {
+	if( v_limb == 1 )
+	    MPN_COPY( prodp, up, size );
+	else
+	    MPN_ZERO( prodp, size );
+	cy = 0;
+    }
+    else
+	cy = mpihelp_mul_1( prodp, up, size, v_limb );
+
+    prodp[size] = cy;
+    prodp++;
+
+    /* For each iteration in the outer loop, multiply one limb from
+     * U with one limb from V, and add it to PROD.  */
+    for( i = 1; i < size; i++ ) {
+	v_limb = vp[i];
+	if( v_limb <= 1 ) {
+	    cy = 0;
+	    if( v_limb == 1 )
+	       cy = mpihelp_add_n(prodp, prodp, up, size);
+	}
+	else
+	    cy = mpihelp_addmul_1(prodp, up, size, v_limb);
+
+	prodp[size] = cy;
+	prodp++;
+    }
+
+    return cy;
+}
+
+
+static void
+mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
+			mpi_size_t size, mpi_ptr_t tspace )
+{
+    if( size & 1 ) {
+      /* The size is odd, and the code below doesn't handle that.
+       * Multiply the least significant (size - 1) limbs with a recursive
+       * call, and handle the most significant limb of S1 and S2
+       * separately.
+       * A slightly faster way to do this would be to make the Karatsuba
+       * code below behave as if the size were even, and let it check for
+       * odd size in the end.  I.e., in essence move this code to the end.
+       * Doing so would save us a recursive call, and potentially make the
+       * stack grow a lot less.
+       */
+      mpi_size_t esize = size - 1;	 /* even size */
+      mpi_limb_t cy_limb;
+
+      MPN_MUL_N_RECURSE( prodp, up, vp, esize, tspace );
+      cy_limb = mpihelp_addmul_1( prodp + esize, up, esize, vp[esize] );
+      prodp[esize + esize] = cy_limb;
+      cy_limb = mpihelp_addmul_1( prodp + esize, vp, size, up[esize] );
+      prodp[esize + size] = cy_limb;
+    }
+    else {
+	/* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
+	 *
+	 * Split U in two pieces, U1 and U0, such that
+	 * U = U0 + U1*(B**n),
+	 * and V in V1 and V0, such that
+	 * V = V0 + V1*(B**n).
+	 *
+	 * UV is then computed recursively using the identity
+	 *
+	 *	  2n   n	  n			n
+	 * UV = (B  + B )U V  +  B (U -U )(V -V )  +  (B + 1)U V
+	 *		  1 1	     1	0   0  1	      0 0
+	 *
+	 * Where B = 2**BITS_PER_MP_LIMB.
+	 */
+	mpi_size_t hsize = size >> 1;
+	mpi_limb_t cy;
+	int negflg;
+
+	/* Product H.	   ________________  ________________
+	 *		  |_____U1 x V1____||____U0 x V0_____|
+	 * Put result in upper part of PROD and pass low part of TSPACE
+	 * as new TSPACE.
+	 */
+	MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, tspace);
+
+	/* Product M.	   ________________
+	 *		  |_(U1-U0)(V0-V1)_|
+	 */
+	if( mpihelp_cmp(up + hsize, up, hsize) >= 0 ) {
+	    mpihelp_sub_n(prodp, up + hsize, up, hsize);
+	    negflg = 0;
+	}
+	else {
+	    mpihelp_sub_n(prodp, up, up + hsize, hsize);
+	    negflg = 1;
+	}
+	if( mpihelp_cmp(vp + hsize, vp, hsize) >= 0 ) {
+	    mpihelp_sub_n(prodp + hsize, vp + hsize, vp, hsize);
+	    negflg ^= 1;
+	}
+	else {
+	    mpihelp_sub_n(prodp + hsize, vp, vp + hsize, hsize);
+	    /* No change of NEGFLG.  */
+	}
+	/* Read temporary operands from low part of PROD.
+	 * Put result in low part of TSPACE using upper part of TSPACE
+	 * as new TSPACE.
+	 */
+	MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, tspace + size);
+
+	/* Add/copy product H. */
+	MPN_COPY (prodp + hsize, prodp + size, hsize);
+	cy = mpihelp_add_n( prodp + size, prodp + size,
+			    prodp + size + hsize, hsize);
+
+	/* Add product M (if NEGFLG M is a negative number) */
+	if(negflg)
+	    cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size);
+	else
+	    cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
+
+	/* Product L.	   ________________  ________________
+	 *		  |________________||____U0 x V0_____|
+	 * Read temporary operands from low part of PROD.
+	 * Put result in low part of TSPACE using upper part of TSPACE
+	 * as new TSPACE.
+	 */
+	MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
+
+	/* Add/copy Product L (twice) */
+
+	cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
+	if( cy )
+	  mpihelp_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy);
+
+	MPN_COPY(prodp, tspace, hsize);
+	cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize);
+	if( cy )
+	    mpihelp_add_1(prodp + size, prodp + size, size, 1);
+    }
+}
+
+
+void
+mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size )
+{
+    mpi_size_t i;
+    mpi_limb_t cy_limb;
+    mpi_limb_t v_limb;
+
+    /* Multiply by the first limb in V separately, as the result can be
+     * stored (not added) to PROD.  We also avoid a loop for zeroing.  */
+    v_limb = up[0];
+    if( v_limb <= 1 ) {
+	if( v_limb == 1 )
+	    MPN_COPY( prodp, up, size );
+	else
+	    MPN_ZERO(prodp, size);
+	cy_limb = 0;
+    }
+    else
+	cy_limb = mpihelp_mul_1( prodp, up, size, v_limb );
+
+    prodp[size] = cy_limb;
+    prodp++;
+
+    /* For each iteration in the outer loop, multiply one limb from
+     * U with one limb from V, and add it to PROD.  */
+    for( i=1; i < size; i++) {
+	v_limb = up[i];
+	if( v_limb <= 1 ) {
+	    cy_limb = 0;
+	    if( v_limb == 1 )
+		cy_limb = mpihelp_add_n(prodp, prodp, up, size);
+	}
+	else
+	    cy_limb = mpihelp_addmul_1(prodp, up, size, v_limb);
+
+	prodp[size] = cy_limb;
+	prodp++;
+    }
+}
+
+
+void
+mpih_sqr_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
+{
+    if( size & 1 ) {
+	/* The size is odd, and the code below doesn't handle that.
+	 * Multiply the least significant (size - 1) limbs with a recursive
+	 * call, and handle the most significant limb of S1 and S2
+	 * separately.
+	 * A slightly faster way to do this would be to make the Karatsuba
+	 * code below behave as if the size were even, and let it check for
+	 * odd size in the end.  I.e., in essence move this code to the end.
+	 * Doing so would save us a recursive call, and potentially make the
+	 * stack grow a lot less.
+	 */
+	mpi_size_t esize = size - 1;	   /* even size */
+	mpi_limb_t cy_limb;
+
+	MPN_SQR_N_RECURSE( prodp, up, esize, tspace );
+	cy_limb = mpihelp_addmul_1( prodp + esize, up, esize, up[esize] );
+	prodp[esize + esize] = cy_limb;
+	cy_limb = mpihelp_addmul_1( prodp + esize, up, size, up[esize] );
+
+	prodp[esize + size] = cy_limb;
+    }
+    else {
+	mpi_size_t hsize = size >> 1;
+	mpi_limb_t cy;
+
+	/* Product H.	   ________________  ________________
+	 *		  |_____U1 x U1____||____U0 x U0_____|
+	 * Put result in upper part of PROD and pass low part of TSPACE
+	 * as new TSPACE.
+	 */
+	MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
+
+	/* Product M.	   ________________
+	 *		  |_(U1-U0)(U0-U1)_|
+	 */
+	if( mpihelp_cmp( up + hsize, up, hsize) >= 0 )
+	    mpihelp_sub_n( prodp, up + hsize, up, hsize);
+	else
+	    mpihelp_sub_n (prodp, up, up + hsize, hsize);
+
+	/* Read temporary operands from low part of PROD.
+	 * Put result in low part of TSPACE using upper part of TSPACE
+	 * as new TSPACE.  */
+	MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
+
+	/* Add/copy product H  */
+	MPN_COPY(prodp + hsize, prodp + size, hsize);
+	cy = mpihelp_add_n(prodp + size, prodp + size,
+			   prodp + size + hsize, hsize);
+
+	/* Add product M (if NEGFLG M is a negative number).  */
+	cy -= mpihelp_sub_n (prodp + hsize, prodp + hsize, tspace, size);
+
+	/* Product L.	   ________________  ________________
+	 *		  |________________||____U0 x U0_____|
+	 * Read temporary operands from low part of PROD.
+	 * Put result in low part of TSPACE using upper part of TSPACE
+	 * as new TSPACE.  */
+	MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size);
+
+	/* Add/copy Product L (twice).	*/
+	cy += mpihelp_add_n (prodp + hsize, prodp + hsize, tspace, size);
+	if( cy )
+	    mpihelp_add_1(prodp + hsize + size, prodp + hsize + size,
+							    hsize, cy);
+
+	MPN_COPY(prodp, tspace, hsize);
+	cy = mpihelp_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
+	if( cy )
+	    mpihelp_add_1 (prodp + size, prodp + size, size, 1);
+    }
+}
+
+
+/* This should be made into an inline function in gmp.h.  */
+void
+mpihelp_mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
+{
+    int secure;
+
+    if( up == vp ) {
+	if( size < KARATSUBA_THRESHOLD )
+	    mpih_sqr_n_basecase( prodp, up, size );
+	else {
+	    mpi_ptr_t tspace;
+	    secure = m_is_secure( up );
+	    tspace = mpi_alloc_limb_space( 2 * size, secure );
+	    mpih_sqr_n( prodp, up, size, tspace );
+	    mpi_free_limb_space( tspace );
+	}
+    }
+    else {
+	if( size < KARATSUBA_THRESHOLD )
+	    mul_n_basecase( prodp, up, vp, size );
+	else {
+	    mpi_ptr_t tspace;
+	    secure = m_is_secure( up ) || m_is_secure( vp );
+	    tspace = mpi_alloc_limb_space( 2 * size, secure );
+	    mul_n (prodp, up, vp, size, tspace);
+	    mpi_free_limb_space( tspace );
+	}
+    }
+}
+
+
+
+void
+mpihelp_mul_karatsuba_case( mpi_ptr_t prodp,
+			    mpi_ptr_t up, mpi_size_t usize,
+			    mpi_ptr_t vp, mpi_size_t vsize,
+			    struct karatsuba_ctx *ctx )
+{
+    mpi_limb_t cy;
+
+    if( !ctx->tspace || ctx->tspace_size < vsize ) {
+	if( ctx->tspace )
+	    mpi_free_limb_space( ctx->tspace );
+	ctx->tspace = mpi_alloc_limb_space( 2 * vsize,
+				       m_is_secure( up ) || m_is_secure( vp ) );
+	ctx->tspace_size = vsize;
+    }
+
+    MPN_MUL_N_RECURSE( prodp, up, vp, vsize, ctx->tspace );
+
+    prodp += vsize;
+    up += vsize;
+    usize -= vsize;
+    if( usize >= vsize ) {
+	if( !ctx->tp || ctx->tp_size < vsize ) {
+	    if( ctx->tp )
+		mpi_free_limb_space( ctx->tp );
+	    ctx->tp = mpi_alloc_limb_space( 2 * vsize, m_is_secure( up )
+						      || m_is_secure( vp ) );
+	    ctx->tp_size = vsize;
+	}
+
+	do {
+	    MPN_MUL_N_RECURSE( ctx->tp, up, vp, vsize, ctx->tspace );
+	    cy = mpihelp_add_n( prodp, prodp, ctx->tp, vsize );
+	    mpihelp_add_1( prodp + vsize, ctx->tp + vsize, vsize, cy );
+	    prodp += vsize;
+	    up += vsize;
+	    usize -= vsize;
+	} while( usize >= vsize );
+    }
+
+    if( usize ) {
+	if( usize < KARATSUBA_THRESHOLD ) {
+	    mpihelp_mul( ctx->tspace, vp, vsize, up, usize );
+	}
+	else {
+	    if( !ctx->next ) {
+		ctx->next = m_alloc_clear( sizeof *ctx );
+	    }
+	    mpihelp_mul_karatsuba_case( ctx->tspace,
+					vp, vsize,
+					up, usize,
+					ctx->next );
+	}
+
+	cy = mpihelp_add_n( prodp, prodp, ctx->tspace, vsize);
+	mpihelp_add_1( prodp + vsize, ctx->tspace + vsize, usize, cy );
+    }
+}
+
+
+void
+mpihelp_release_karatsuba_ctx( struct karatsuba_ctx *ctx )
+{
+    struct karatsuba_ctx *ctx2;
+
+    if( ctx->tp )
+	mpi_free_limb_space( ctx->tp );
+    if( ctx->tspace )
+	mpi_free_limb_space( ctx->tspace );
+    for( ctx=ctx->next; ctx; ctx = ctx2 ) {
+	ctx2 = ctx->next;
+	if( ctx->tp )
+	    mpi_free_limb_space( ctx->tp );
+	if( ctx->tspace )
+	    mpi_free_limb_space( ctx->tspace );
+	m_free( ctx );
+    }
+}
+
+/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
+ * and v (pointed to by VP, with VSIZE limbs), and store the result at
+ * PRODP.  USIZE + VSIZE limbs are always stored, but if the input
+ * operands are normalized.  Return the most significant limb of the
+ * result.
+ *
+ * NOTE: The space pointed to by PRODP is overwritten before finished
+ * with U and V, so overlap is an error.
+ *
+ * Argument constraints:
+ * 1. USIZE >= VSIZE.
+ * 2. PRODP != UP and PRODP != VP, i.e. the destination
+ *    must be distinct from the multiplier and the multiplicand.
+ */
+
+mpi_limb_t
+mpihelp_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
+			      mpi_ptr_t vp, mpi_size_t vsize)
+{
+    mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
+    mpi_limb_t cy;
+    struct karatsuba_ctx ctx;
+
+    if( vsize < KARATSUBA_THRESHOLD ) {
+	mpi_size_t i;
+	mpi_limb_t v_limb;
+
+	if( !vsize )
+	    return 0;
+
+	/* Multiply by the first limb in V separately, as the result can be
+	 * stored (not added) to PROD.	We also avoid a loop for zeroing.  */
+	v_limb = vp[0];
+	if( v_limb <= 1 ) {
+	    if( v_limb == 1 )
+		MPN_COPY( prodp, up, usize );
+	    else
+		MPN_ZERO( prodp, usize );
+	    cy = 0;
+	}
+	else
+	    cy = mpihelp_mul_1( prodp, up, usize, v_limb );
+
+	prodp[usize] = cy;
+	prodp++;
+
+	/* For each iteration in the outer loop, multiply one limb from
+	 * U with one limb from V, and add it to PROD.	*/
+	for( i = 1; i < vsize; i++ ) {
+	    v_limb = vp[i];
+	    if( v_limb <= 1 ) {
+		cy = 0;
+		if( v_limb == 1 )
+		   cy = mpihelp_add_n(prodp, prodp, up, usize);
+	    }
+	    else
+		cy = mpihelp_addmul_1(prodp, up, usize, v_limb);
+
+	    prodp[usize] = cy;
+	    prodp++;
+	}
+
+	return cy;
+    }
+
+    memset( &ctx, 0, sizeof ctx );
+    mpihelp_mul_karatsuba_case( prodp, up, usize, vp, vsize, &ctx );
+    mpihelp_release_karatsuba_ctx( &ctx );
+    return *prod_endp;
+}
+
+
diff -Nru a/security/rsa/gnupg_mpi_mpiutil.c b/security/rsa/gnupg_mpi_mpiutil.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_mpiutil.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,468 @@
+/* mpiutil.ac  -  Utility functions for MPI
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include <linux/string.h>
+
+#include "gnupg_mpi_mpi.h"
+#include "gnupg_mpi_mpi-internal.h"
+#include "gnupg_mpi_memory.h"
+
+
+int DBG_MEMORY = 0; 
+
+#ifdef M_DEBUG
+  #undef mpi_alloc
+  #undef mpi_alloc_secure
+  #undef mpi_free
+#endif
+
+/****************
+ * Note:  It was a bad idea to use the number of limbs to allocate
+ *	  because on a alpha the limbs are large but we normally need
+ *	  integers of n bits - So we should chnage this to bits (or bytes).
+ *
+ *	  But mpi_alloc is used in a lot of places :-)
+ */
+MPI
+#ifdef M_DEBUG
+mpi_debug_alloc( unsigned nlimbs, const char *info )
+#else
+mpi_alloc( unsigned nlimbs )
+#endif
+{
+    MPI a;
+
+    if( DBG_MEMORY )
+	log_debug("mpi_alloc(%u)\n", nlimbs*BITS_PER_MPI_LIMB );
+  #ifdef M_DEBUG
+    a = m_debug_alloc( sizeof *a, info );
+    a->d = nlimbs? mpi_debug_alloc_limb_space( nlimbs, 0, info ) : NULL;
+  #else
+    a = m_alloc( sizeof *a );
+    a->d = nlimbs? mpi_alloc_limb_space( nlimbs, 0 ) : NULL;
+  #endif
+    a->alloced = nlimbs;
+    a->nlimbs = 0;
+    a->sign = 0;
+    a->flags = 0;
+    a->nbits = 0;
+    return a;
+}
+
+void
+mpi_m_check( MPI a )
+{
+    m_check(a);
+    m_check(a->d);
+}
+
+MPI
+#ifdef M_DEBUG
+mpi_debug_alloc_secure( unsigned nlimbs, const char *info )
+#else
+mpi_alloc_secure( unsigned nlimbs )
+#endif
+{
+    MPI a;
+
+    if( DBG_MEMORY )
+	log_debug("mpi_alloc_secure(%u)\n", nlimbs*BITS_PER_MPI_LIMB );
+  #ifdef M_DEBUG
+    a = m_debug_alloc( sizeof *a, info );
+    a->d = nlimbs? mpi_debug_alloc_limb_space( nlimbs, 1, info ) : NULL;
+  #else
+    a = m_alloc( sizeof *a );
+    a->d = nlimbs? mpi_alloc_limb_space( nlimbs, 1 ) : NULL;
+  #endif
+    a->alloced = nlimbs;
+    a->flags = 1;
+    a->nlimbs = 0;
+    a->sign = 0;
+    a->nbits = 0;
+    return a;
+}
+
+
+#if 0
+static void *unused_limbs_5;
+static void *unused_limbs_32;
+static void *unused_limbs_64;
+#endif
+
+mpi_ptr_t
+#ifdef M_DEBUG
+mpi_debug_alloc_limb_space( unsigned nlimbs, int secure, const char *info )
+#else
+mpi_alloc_limb_space( unsigned nlimbs, int secure )
+#endif
+{
+    size_t len = nlimbs * sizeof(mpi_limb_t);
+    mpi_ptr_t p;
+
+    if( DBG_MEMORY )
+	log_debug("mpi_alloc_limb_space(%u)\n", (unsigned)len*8 );
+  #if 0
+    if( !secure ) {
+	if( nlimbs == 5 && unused_limbs_5 ) {  /* DSA 160 bits */
+	    p = unused_limbs_5;
+	    unused_limbs_5 = *p;
+	    return p;
+	}
+	else if( nlimbs == 32 && unused_limbs_32 ) {  /* DSA 1024 bits */
+	    p = unused_limbs_32;
+	    unused_limbs_32 = *p;
+	    return p;
+	}
+	else if( nlimbs == 64 && unused_limbs_64 ) {  /* DSA 2*1024 bits */
+	    p = unused_limbs_64;
+	    unused_limbs_64 = *p;
+	    return p;
+	}
+    }
+  #endif
+
+  #ifdef M_DEBUG
+    p = secure? m_debug_alloc_secure(len, info):m_debug_alloc( len, info );
+  #else
+    p = secure? m_alloc_secure( len ):m_alloc( len );
+  #endif
+
+    return p;
+}
+
+void
+#ifdef M_DEBUG
+mpi_debug_free_limb_space( mpi_ptr_t a, const char *info )
+#else
+mpi_free_limb_space( mpi_ptr_t a )
+#endif
+{
+    if( !a )
+	return;
+    if( DBG_MEMORY )
+	log_debug("mpi_free_limb_space of size %lu\n", (ulong)m_size(a)*8 );
+
+  #if 0
+    if( !m_is_secure(a) ) {
+	size_t nlimbs = m_size(a) / 4 ;
+	void *p = a;
+
+	if( nlimbs == 5 ) {  /* DSA 160 bits */
+	    *a = unused_limbs_5;
+	    unused_limbs_5 = a;
+	    return;
+	}
+	else if( nlimbs == 32 ) {  /* DSA 1024 bits */
+	    *a = unused_limbs_32;
+	    unused_limbs_32 = a;
+	    return;
+	}
+	else if( nlimbs == 64 ) {  /* DSA 2*1024 bits */
+	    *a = unused_limbs_64;
+	    unused_limbs_64 = a;
+	    return;
+	}
+    }
+  #endif
+
+
+    m_free(a);
+}
+
+
+void
+mpi_assign_limb_space( MPI a, mpi_ptr_t ap, unsigned nlimbs )
+{
+    mpi_free_limb_space(a->d);
+    a->d = ap;
+    a->alloced = nlimbs;
+}
+
+
+
+/****************
+ * Resize the array of A to NLIMBS. the additional space is cleared
+ * (set to 0) [done by m_realloc()]
+ */
+void
+#ifdef M_DEBUG
+mpi_debug_resize( MPI a, unsigned nlimbs, const char *info )
+#else
+mpi_resize( MPI a, unsigned nlimbs )
+#endif
+{
+    if( nlimbs <= a->alloced )
+	return; /* no need to do it */
+    /* Note: a->secure is not used - instead the realloc functions
+     * take care of it. Maybe we should drop a->secure completely
+     * and rely on a mpi_is_secure function, which would be
+     * a wrapper around m_is_secure
+     */
+  #ifdef M_DEBUG
+    if( a->d )
+	a->d = m_debug_realloc(a->d, nlimbs * sizeof(mpi_limb_t), info );
+    else
+	a->d = m_debug_alloc_clear( nlimbs * sizeof(mpi_limb_t), info );
+  #else
+    if( a->d )
+	a->d = m_realloc(a->d, nlimbs * sizeof(mpi_limb_t) );
+    else
+	a->d = m_alloc_clear( nlimbs * sizeof(mpi_limb_t) );
+  #endif
+    a->alloced = nlimbs;
+}
+
+void
+mpi_clear( MPI a )
+{
+    a->nlimbs = 0;
+    a->nbits = 0;
+    a->flags = 0;
+}
+
+
+void
+#ifdef M_DEBUG
+mpi_debug_free( MPI a, const char *info )
+#else
+mpi_free( MPI a )
+#endif
+{
+    if( !a )
+	return;
+    if( DBG_MEMORY )
+	log_debug("mpi_free\n" );
+    if( a->flags & 4 )
+	m_free( a->d );
+    else {
+      #ifdef M_DEBUG
+	mpi_debug_free_limb_space(a->d, info);
+      #else
+	mpi_free_limb_space(a->d);
+      #endif
+    }
+    if( a->flags & ~7 )
+	log_bug("invalid flag value in mpi\n");
+    m_free(a);
+}
+
+
+void
+mpi_set_secure( MPI a )
+{
+    mpi_ptr_t ap, bp;
+
+    if( (a->flags & 1) )
+	return;
+    a->flags |= 1;
+    ap = a->d;
+    if( !a->nlimbs ) {
+	assert(!ap);
+	return;
+    }
+  #ifdef M_DEBUG
+    bp = mpi_debug_alloc_limb_space( a->nlimbs, 1, "set_secure" );
+  #else
+    bp = mpi_alloc_limb_space( a->nlimbs, 1 );
+  #endif
+    MPN_COPY( bp, ap, a->nlimbs );
+    a->d = bp;
+  #ifdef M_DEBUG
+    mpi_debug_free_limb_space(ap, "set_secure");
+  #else
+    mpi_free_limb_space(ap);
+  #endif
+}
+
+
+MPI
+mpi_set_opaque( MPI a, void *p, int len )
+{
+    if( !a ) {
+      #ifdef M_DEBUG
+	a = mpi_debug_alloc(0,"alloc_opaque");
+      #else
+	a = mpi_alloc(0);
+      #endif
+    }
+
+    if( a->flags & 4 )
+	m_free( a->d );
+    else {
+      #ifdef M_DEBUG
+	mpi_debug_free_limb_space(a->d, "alloc_opaque");
+      #else
+	mpi_free_limb_space(a->d);
+      #endif
+    }
+
+    a->d = p;
+    a->alloced = 0;
+    a->nlimbs = 0;
+    a->nbits = len;
+    a->flags = 4;
+    return a;
+}
+
+
+void *
+mpi_get_opaque( MPI a, int *len )
+{
+    if( !(a->flags & 4) )
+	log_bug("mpi_get_opaque on normal mpi\n");
+    if( len )
+	*len = a->nbits;
+    return a->d;
+}
+
+
+/****************
+ * Note: This copy function should not interpret the MPI
+ *	 but copy it transparently.
+ */
+MPI
+#ifdef M_DEBUG
+mpi_debug_copy( MPI a, const char *info )
+#else
+mpi_copy( MPI a )
+#endif
+{
+    int i;
+    MPI b;
+
+    if( a && (a->flags & 4) ) {
+	void *p = m_is_secure(a->d)? m_alloc_secure( a->nbits )
+				   : m_alloc( a->nbits );
+	memcpy( p, a->d, a->nbits );
+	b = mpi_set_opaque( NULL, p, a->nbits );
+    }
+    else if( a ) {
+      #ifdef M_DEBUG
+	b = mpi_is_secure(a)? mpi_debug_alloc_secure( a->nlimbs, info )
+			    : mpi_debug_alloc( a->nlimbs, info );
+      #else
+	b = mpi_is_secure(a)? mpi_alloc_secure( a->nlimbs )
+			    : mpi_alloc( a->nlimbs );
+      #endif
+	b->nlimbs = a->nlimbs;
+	b->sign = a->sign;
+	b->flags  = a->flags;
+	b->nbits = a->nbits;
+	for(i=0; i < b->nlimbs; i++ )
+	    b->d[i] = a->d[i];
+    }
+    else
+	b = NULL;
+    return b;
+}
+
+
+/****************
+ * This function allocates an MPI which is optimized to hold
+ * a value as large as the one given in the arhgument and allocates it
+ * with the same flags as A.
+ */
+MPI
+#ifdef M_DEBUG
+mpi_debug_alloc_like( MPI a, const char *info )
+#else
+mpi_alloc_like( MPI a )
+#endif
+{
+    MPI b;
+
+    if( a && (a->flags & 4) ) {
+	void *p = m_is_secure(a->d)? m_alloc_secure( a->nbits )
+				   : m_alloc( a->nbits );
+	memcpy( p, a->d, a->nbits );
+	b = mpi_set_opaque( NULL, p, a->nbits );
+    }
+    else if( a ) {
+      #ifdef M_DEBUG
+	b = mpi_is_secure(a)? mpi_debug_alloc_secure( a->nlimbs, info )
+			    : mpi_debug_alloc( a->nlimbs, info );
+      #else
+	b = mpi_is_secure(a)? mpi_alloc_secure( a->nlimbs )
+			    : mpi_alloc( a->nlimbs );
+      #endif
+	b->nlimbs = 0;
+	b->sign = 0;
+	b->flags = a->flags;
+	b->nbits = 0;
+    }
+    else
+	b = NULL;
+    return b;
+}
+
+
+void
+mpi_set( MPI w, MPI u)
+{
+    mpi_ptr_t wp, up;
+    mpi_size_t usize = u->nlimbs;
+    int usign = u->sign;
+
+    RESIZE_IF_NEEDED(w, usize);
+    wp = w->d;
+    up = u->d;
+    MPN_COPY( wp, up, usize );
+    w->nlimbs = usize;
+    w->nbits = u->nbits;
+    w->flags = u->flags;
+    w->sign = usign;
+}
+
+
+void
+mpi_set_ui( MPI w, unsigned long u)
+{
+    RESIZE_IF_NEEDED(w, 1);
+    w->d[0] = u;
+    w->nlimbs = u? 1:0;
+    w->sign = 0;
+    w->nbits = 0;
+    w->flags = 0;
+}
+
+
+MPI
+mpi_alloc_set_ui( unsigned long u)
+{
+  #ifdef M_DEBUG
+    MPI w = mpi_debug_alloc(1,"alloc_set_ui");
+  #else
+    MPI w = mpi_alloc(1);
+  #endif
+    w->d[0] = u;
+    w->nlimbs = u? 1:0;
+    w->sign = 0;
+    return w;
+}
+
+
+void
+mpi_swap( MPI a, MPI b)
+{
+    struct gcry_mpi tmp;
+
+    tmp = *a; *a = *b; *b = tmp;
+}
+
diff -Nru a/security/rsa/gnupg_mpi_types.h b/security/rsa/gnupg_mpi_types.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/gnupg_mpi_types.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,88 @@
+/* types.h - some common typedefs
+ *	Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GNUPG.
+ *
+ * GNUPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GNUPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#ifndef G10_TYPES_H
+#define G10_TYPES_H
+
+
+/* The AC_CHECK_SIZEOF() in configure fails for some machines.
+ * we provide some fallback values here */
+#if !SIZEOF_UNSIGNED_SHORT
+  #undef SIZEOF_UNSIGNED_SHORT
+  #define SIZEOF_UNSIGNED_SHORT 2
+#endif
+#if !SIZEOF_UNSIGNED_INT
+  #undef SIZEOF_UNSIGNED_INT
+  #define SIZEOF_UNSIGNED_INT 4
+#endif
+#if !SIZEOF_UNSIGNED_LONG
+  #undef SIZEOF_UNSIGNED_LONG
+  #define SIZEOF_UNSIGNED_LONG 4
+#endif
+
+
+#include <linux/types.h>
+
+
+#ifndef HAVE_BYTE_TYPEDEF
+  #undef byte	    /* maybe there is a macro with this name */
+  #ifndef __riscos__
+    typedef unsigned char byte;
+  #else 
+    /* Norcroft treats char  = unsigned char  as legal assignment
+                   but char* = unsigned char* as illegal assignment
+       and the same applies to the signed variants as well  */
+    typedef char byte;
+  #endif 
+  #define HAVE_BYTE_TYPEDEF
+#endif
+
+#ifndef HAVE_USHORT_TYPEDEF
+  #undef ushort     /* maybe there is a macro with this name */
+  typedef unsigned short ushort;
+  #define HAVE_USHORT_TYPEDEF
+#endif
+
+#ifndef HAVE_ULONG_TYPEDEF
+  #undef ulong	    /* maybe there is a macro with this name */
+  typedef unsigned long ulong;
+  #define HAVE_ULONG_TYPEDEF
+#endif
+
+typedef union {
+    int a;
+    short b;
+    char c[1];
+    long d;
+  #ifdef HAVE_U64_TYPEDEF
+    u64 e;
+  #endif
+    float f;
+    double g;
+} PROPERLY_ALIGNED_TYPE;
+
+typedef struct string_list {
+    struct string_list *next;
+    unsigned int flags;
+    char d[1];
+} *STRLIST;
+
+
+#endif /*G10_TYPES_H*/
diff -Nru a/security/rsa/mpi.c b/security/rsa/mpi.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/mpi.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,1933 @@
+//#include <stdlib.h>
+//#include <string.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "foo.h"
+
+mpi_limb_t _gcry_mpih_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, mpi_ptr_t vp, mpi_size_t vsize);
+
+const unsigned char __clz_tab[] = {
+  0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+};
+
+//#define gcry_xmalloc(X)	malloc(X)
+//#define gcry_xrealloc(X, Y)	realloc(X, Y)
+//#define gcry_xcalloc(X, Y)	calloc(X, Y)
+//#define gcry_free(X)		free(X)
+
+static __inline__ void * kcalloc(size_t nmemb, size_t size)
+{
+	void *foo;
+	foo = kmalloc(nmemb*size, GFP_KERNEL);
+	if (!foo)
+		return NULL;
+	memset(foo, 0x00, nmemb*size);
+	return foo;
+}
+
+// yeah, it's a hack, sue me...
+static __inline__ void * krealloc(void *old, size_t size)
+{
+	void *new;
+
+	new = kmalloc(size, GFP_KERNEL);
+	if (old == NULL)
+		return new;
+	if (!new)
+		return NULL;
+	memcpy(new, old, size);
+	return new;
+}
+
+
+#define gcry_xmalloc(X)		kmalloc(X, GFP_KERNEL)
+#define gcry_xrealloc(X, Y)	krealloc(X, Y)
+#define gcry_xcalloc(X, Y)	kcalloc(X, Y)
+#define gcry_free(X)		kfree(X)
+
+
+#define gcry_is_secure(X)	(0)
+#define mpi_alloc(X)		mpi_alloc_secure(X)
+
+#define mpi_free_limb_space(X)	gcry_free(X)
+
+/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
+ * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
+ * If this would yield overflow, DI should be the largest possible number
+ * (i.e., only ones).  For correct operation, the most significant bit of D
+ * has to be set.  Put the quotient in Q and the remainder in R.
+ */
+#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
+    do {                                                            \
+        mpi_limb_t _q, _ql, _r;                                     \
+        mpi_limb_t _xh, _xl;                                        \
+        umul_ppmm (_q, _ql, (nh), (di));                            \
+        _q += (nh);     /* DI is 2**BITS_PER_MPI_LIMB too small */  \
+        umul_ppmm (_xh, _xl, _q, (d));                              \
+        sub_ddmmss (_xh, _r, (nh), (nl), _xh, _xl);                 \
+        if( _xh ) {                                                 \
+            sub_ddmmss (_xh, _r, _xh, _r, 0, (d));                  \
+            _q++;                                                   \
+            if( _xh) {                                              \
+                sub_ddmmss (_xh, _r, _xh, _r, 0, (d));              \
+                _q++;                                               \
+            }                                                       \
+        }                                                           \
+        if( _r >= (d) ) {                                           \
+            _r -= (d);                                              \
+            _q++;                                                   \
+        }                                                           \
+        (r) = _r;                                                   \
+        (q) = _q;                                                   \
+    } while (0)
+
+
+/* Define this unconditionally, so it can be used for debugging.  */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+  do {                                                                  \
+    UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;                     \
+    __d1 = __ll_highpart (d);                                           \
+    __d0 = __ll_lowpart (d);                                            \
+                                                                        \
+    __r1 = (n1) % __d1;                                                 \
+    __q1 = (n1) / __d1;                                                 \
+    __m = (UWtype) __q1 * __d0;                                         \
+    __r1 = __r1 * __ll_B | __ll_highpart (n0);                          \
+    if (__r1 < __m)                                                     \
+      {                                                                 \
+        __q1--, __r1 += (d);                                            \
+        if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+          if (__r1 < __m)                                               \
+            __q1--, __r1 += (d);                                        \
+      }                                                                 \
+    __r1 -= __m;                                                        \
+                                                                        \
+    __r0 = __r1 % __d1;                                                 \
+    __q0 = __r1 / __d1;                                                 \
+    __m = (UWtype) __q0 * __d0;                                         \
+    __r0 = __r0 * __ll_B | __ll_lowpart (n0);                           \
+    if (__r0 < __m)                                                     \
+      {                                                                 \
+        __q0--, __r0 += (d);                                            \
+        if (__r0 >= (d))                                                \
+          if (__r0 < __m)                                               \
+            __q0--, __r0 += (d);                                        \
+      }                                                                 \
+    __r0 -= __m;                                                        \
+                                                                        \
+    (q) = (UWtype) __q1 * __ll_B | __q0;                                \
+    (r) = __r0;                                                         \
+  } while (0)
+
+
+struct gcry_mpi *mpi_alloc_secure( unsigned nlimbs )
+{
+    struct gcry_mpi *a;
+
+    a = gcry_xmalloc( sizeof *a );
+    a->d = nlimbs? mpi_alloc_limb_space( nlimbs, 1 ) : NULL;
+    a->alloced = nlimbs;
+    a->flags = 1;
+    a->nlimbs = 0;
+    a->sign = 0;
+    return a;
+}
+
+
+mpi_ptr_t mpi_alloc_limb_space( unsigned nlimbs, int secure )
+{
+    size_t len = nlimbs * sizeof(mpi_limb_t);
+    mpi_ptr_t p;
+
+    p = gcry_xmalloc( len );
+
+    return p;
+}
+
+void mpi_assign_limb_space( struct gcry_mpi *a, mpi_ptr_t ap, unsigned nlimbs )
+{
+    mpi_free_limb_space(a->d);
+    a->d = ap;
+    a->alloced = nlimbs;
+}
+
+
+
+struct gcry_mpi * gcry_mpi_set_opaque( struct gcry_mpi * a, void *p, unsigned int nbits )
+{
+    if( !a ) {
+        a = mpi_alloc(0);
+    }
+
+    if( a->flags & 4 )
+        gcry_free( a->d );
+    else {
+        //mpi_free_limb_space(a->d);
+        gcry_free(a->d);
+    }
+
+    a->d = p;
+    a->alloced = 0;
+    a->nlimbs = 0;
+    a->sign  = nbits;
+    a->flags = 4;
+    return a;
+}
+
+
+/****************
+ * Note: This copy function should not interpret the MPI
+ *       but copy it transparently.
+ */
+struct gcry_mpi *mpi_copy( struct gcry_mpi * a )
+{
+    int i; 
+    struct gcry_mpi *b;
+
+    if( a && (a->flags & 4) ) {
+        void *p = gcry_xmalloc( (a->sign+7)/8 );
+        memcpy( p, a->d, (a->sign+7)/8 );
+        b = gcry_mpi_set_opaque( NULL, p, a->sign );
+    }
+    else if( a ) {
+        b = mpi_is_secure(a)? mpi_alloc_secure( a->nlimbs )
+                            : mpi_alloc( a->nlimbs );
+        b->nlimbs = a->nlimbs;
+        b->sign = a->sign;
+        b->flags  = a->flags;
+        for(i=0; i < b->nlimbs; i++ )
+            b->d[i] = a->d[i];
+    }
+    else
+        b = NULL;
+    return b;
+}
+
+
+
+
+mpi_limb_t _gcry_mpih_sub_1(mpi_ptr_t res_ptr,  mpi_ptr_t s1_ptr, size_t s1_size, mpi_limb_t s2_limb )
+{
+    mpi_limb_t x;
+
+    x = *s1_ptr++;
+    s2_limb = x - s2_limb;
+    *res_ptr++ = s2_limb;
+    if( s2_limb > x ) {
+        while( --s1_size ) {
+            x = *s1_ptr++;
+            *res_ptr++ = x - 1;
+            if( x )
+                goto leave;
+        }
+        return 1;
+    }
+
+  leave:
+    if( res_ptr != s1_ptr ) {
+        size_t i;
+        for( i=0; i < s1_size-1; i++ )
+            res_ptr[i] = s1_ptr[i];
+    }
+    return 0;
+}
+
+
+/****************
+ * Resize the array of A to NLIMBS. the additional space is cleared
+ * (set to 0) [done by gcry_realloc()]
+ */
+static void mpi_resize(struct gcry_mpi *a, unsigned nlimbs )
+{
+    if( nlimbs <= a->alloced )
+        return; /* no need to do it */
+    /* Note: a->secure is not used - instead the realloc functions
+     * take care of it. Maybe we should drop a->secure completely
+     * and rely on a mpi_is_secure function, which would be
+     * a wrapper around gcry_is_secure
+     */
+    if( a->d )
+        a->d = gcry_xrealloc(a->d, nlimbs * sizeof(mpi_limb_t) );
+    else  /* FIXME: It may not be allocted in secure memory */
+        a->d = gcry_xcalloc( nlimbs , sizeof(mpi_limb_t) );
+    a->alloced = nlimbs;
+}
+
+
+/****************
+ * Subtract the unsigned integer V from the mpi-integer U and store the
+ * result in W.
+ */
+void mpi_sub_ui(struct gcry_mpi *w, struct gcry_mpi *u, unsigned long v )
+{
+    mpi_ptr_t wp, up;
+    size_t usize, wsize;
+    int usign, wsign;
+
+    usize = u->nlimbs;
+    usign = u->sign;
+    wsign = 0;
+
+    /* If not space for W (and possible carry), increase space.  */
+    wsize = usize + 1;
+    if( w->alloced < wsize )
+        mpi_resize(w, wsize);
+
+    /* These must be after realloc (U may be the same as W).  */
+    up = u->d;
+    wp = w->d;
+
+    if( !usize ) {  /* simple */
+        wp[0] = v;
+        wsize = v? 1:0;
+        wsign = 1;
+    }
+    else if( usign ) {  /* mpi and v are negative */
+        mpi_limb_t cy;
+        cy = _gcry_mpih_add_1(wp, up, usize, v);
+        wp[usize] = cy;
+        wsize = usize + cy;
+    }
+    else {  /* The signs are different.  Need exact comparison to determine
+             * which operand to subtract from which.  */
+        if( usize == 1 && up[0] < v ) {
+            wp[0] = v - up[0];
+            wsize = 1;
+            wsign = 1;
+        }
+        else {
+            _gcry_mpih_sub_1(wp, up, usize, v);
+            /* Size can decrease with at most one limb. */
+            wsize = usize - (wp[usize-1]==0);
+        }
+    }
+
+    w->nlimbs = wsize;
+    w->sign   = wsign;
+}
+
+void mpi_free(struct gcry_mpi *a )
+{
+    if( !a )
+        return;
+    if( a->flags & 4 )
+        gcry_free( a->d );
+    else {
+	//mpi_free_limb_space(a->d);
+        gcry_free(a->d);
+    }
+//    if( a->flags & ~7 )
+//        log_bug("invalid flag value in mpi\n");
+    gcry_free(a);
+}
+
+void mpi_add(struct gcry_mpi *w, struct gcry_mpi *u, struct gcry_mpi *v)
+{
+    mpi_ptr_t wp, up, vp;
+    size_t usize, vsize, wsize;
+    int usign, vsign, wsign;
+
+    if( u->nlimbs < v->nlimbs ) { /* Swap U and V. */
+        usize = v->nlimbs;
+        usign = v->sign;
+        vsize = u->nlimbs;
+        vsign = u->sign;
+        wsize = usize + 1;
+        RESIZE_IF_NEEDED(w, wsize);
+        /* These must be after realloc (u or v may be the same as w).  */
+        up    = v->d;
+        vp    = u->d;
+    }
+    else {
+        usize = u->nlimbs;
+        usign = u->sign;
+        vsize = v->nlimbs;
+        vsign = v->sign;
+        wsize = usize + 1;
+        RESIZE_IF_NEEDED(w, wsize);
+        /* These must be after realloc (u or v may be the same as w).  */
+        up    = u->d;
+        vp    = v->d;
+    }
+    wp = w->d;
+    wsign = 0;
+
+    if( !vsize ) {  /* simple */
+        MPN_COPY(wp, up, usize );
+        wsize = usize;
+        wsign = usign;
+    }
+    else if( usign != vsign ) { /* different sign */
+        /* This test is right since USIZE >= VSIZE */
+        if( usize != vsize ) {
+            _gcry_mpih_sub(wp, up, usize, vp, vsize);
+            wsize = usize;
+            MPN_NORMALIZE(wp, wsize);
+            wsign = usign;
+        }
+        else if( _gcry_mpih_cmp(up, vp, usize) < 0 ) {
+            _gcry_mpih_sub_n(wp, vp, up, usize);
+            wsize = usize;
+            MPN_NORMALIZE(wp, wsize);
+            if( !usign )
+                wsign = 1;
+        }
+        else {
+            _gcry_mpih_sub_n(wp, up, vp, usize);
+            wsize = usize;
+            MPN_NORMALIZE(wp, wsize);
+            if( usign )
+                wsign = 1;
+        }
+    }
+    else { /* U and V have same sign. Add them. */
+        mpi_limb_t cy = _gcry_mpih_add(wp, up, usize, vp, vsize);
+        wp[usize] = cy;
+        wsize = usize + cy;
+        if( usign )
+            wsign = 1;
+    }
+
+    w->nlimbs = wsize;
+    w->sign = wsign;
+}
+
+
+/****************
+ * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
+ * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
+ * Return the single-limb remainder. 
+ * There are no constraints on the value of the divisor.
+ *              
+ * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
+ */                      
+                              
+mpi_limb_t      
+_gcry_mpih_divmod_1( mpi_ptr_t quot_ptr,
+                        mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+                        mpi_limb_t divisor_limb)
+{                              
+    mpi_size_t i;
+    mpi_limb_t n1, n0, r;
+    int dummy;
+            
+    if( !dividend_size )
+        return 0;
+
+    /* If multiplication is much faster than division, and the
+     * dividend is large, pre-invert the divisor, and use
+     * only multiplications in the inner loop.
+     *      
+     * This test should be read:
+     * Does it ever help to use udiv_qrnnd_preinv?
+     * && Does what we save compensate for the inversion overhead?
+     */                    
+    if( UDIV_TIME > (2 * UMUL_TIME + 6)
+        && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) {
+        int normalization_steps;
+
+        count_leading_zeros( normalization_steps, divisor_limb );
+        if( normalization_steps ) {
+            mpi_limb_t divisor_limb_inverted;
+                
+            divisor_limb <<= normalization_steps;
+            
+            /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+             * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+             * most significant bit (with weight 2**N) implicit.
+             */
+            /* Special case for DIVISOR_LIMB == 100...000.  */
+            if( !(divisor_limb << 1) )
+                divisor_limb_inverted = ~(mpi_limb_t)0;
+            else
+                udiv_qrnnd(divisor_limb_inverted, dummy,
+                           -divisor_limb, 0, divisor_limb);
+
+            n1 = dividend_ptr[dividend_size - 1];
+            r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+            /* Possible optimization:
+             * if (r == 0
+             * && divisor_limb > ((n1 << normalization_steps)
+             *                 | (dividend_ptr[dividend_size - 2] >> ...)))
+             * ...one division less...
+             */
+            for( i = dividend_size - 2; i >= 0; i--) {
+                n0 = dividend_ptr[i];
+                UDIV_QRNND_PREINV( quot_ptr[i + 1], r, r,
+                                   ((n1 << normalization_steps)
+                         | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+                              divisor_limb, divisor_limb_inverted);
+                n1 = n0;
+            }
+            UDIV_QRNND_PREINV( quot_ptr[0], r, r,
+                               n1 << normalization_steps,
+                               divisor_limb, divisor_limb_inverted);
+            return r >> normalization_steps;
+        }
+        else {
+            mpi_limb_t divisor_limb_inverted;
+
+            /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+             * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+             * most significant bit (with weight 2**N) implicit.
+             */
+            /* Special case for DIVISOR_LIMB == 100...000.  */
+            if( !(divisor_limb << 1) )
+                divisor_limb_inverted = ~(mpi_limb_t) 0;
+            else
+                udiv_qrnnd(divisor_limb_inverted, dummy,
+                           -divisor_limb, 0, divisor_limb);
+
+            i = dividend_size - 1;
+            r = dividend_ptr[i];
+
+            if( r >= divisor_limb )
+                r = 0;
+            else
+                quot_ptr[i--] = 0;
+
+            for( ; i >= 0; i-- ) {
+                n0 = dividend_ptr[i];
+                UDIV_QRNND_PREINV( quot_ptr[i], r, r,
+                                   n0, divisor_limb, divisor_limb_inverted);
+            }
+            return r;
+        }
+    }
+    else {
+        if(UDIV_NEEDS_NORMALIZATION) {
+            int normalization_steps;
+
+            count_leading_zeros (normalization_steps, divisor_limb);
+            if( normalization_steps ) {
+                divisor_limb <<= normalization_steps;
+ 
+                n1 = dividend_ptr[dividend_size - 1];
+                r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+ 
+                /* Possible optimization:
+                 * if (r == 0
+                 * && divisor_limb > ((n1 << normalization_steps)
+                 *                 | (dividend_ptr[dividend_size - 2] >> ...)))
+                 * ...one division less...
+                 */
+                for( i = dividend_size - 2; i >= 0; i--) {
+                    n0 = dividend_ptr[i];
+                    udiv_qrnnd (quot_ptr[i + 1], r, r,
+                             ((n1 << normalization_steps)
+                         | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+                                divisor_limb);
+                    n1 = n0;
+                }
+                udiv_qrnnd (quot_ptr[0], r, r,
+                            n1 << normalization_steps,
+                            divisor_limb);
+                return r >> normalization_steps;
+            }
+        }
+        /* No normalization needed, either because udiv_qrnnd doesn't require
+         * it, or because DIVISOR_LIMB is already normalized.  */
+        i = dividend_size - 1;
+        r = dividend_ptr[i];
+ 
+        if(r >= divisor_limb)
+            r = 0;
+        else
+            quot_ptr[i--] = 0;
+ 
+        for(; i >= 0; i--) {
+            n0 = dividend_ptr[i];
+            udiv_qrnnd( quot_ptr[i], r, r, n0, divisor_limb );
+        }
+        return r;
+    }
+}
+
+
+mpi_limb_t
+_gcry_mpih_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+                                      mpi_limb_t divisor_limb)
+{                                     
+    mpi_size_t i;
+    mpi_limb_t n1, n0, r;
+    int dummy; 
+    
+    /* Botch: Should this be handled at all?  Rely on callers?  */
+    if( !dividend_size )
+        return 0;
+        
+    /* If multiplication is much faster than division, and the
+     * dividend is large, pre-invert the divisor, and use
+     * only multiplications in the inner loop.
+     *
+     * This test should be read:
+     *   Does it ever help to use udiv_qrnnd_preinv?
+     *     && Does what we save compensate for the inversion overhead?
+     */
+    if( UDIV_TIME > (2 * UMUL_TIME + 6)
+        && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) {
+        int normalization_steps;
+        
+        count_leading_zeros( normalization_steps, divisor_limb );
+        if( normalization_steps ) {
+            mpi_limb_t divisor_limb_inverted;
+            
+            divisor_limb <<= normalization_steps;
+            
+            /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+             * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+             * most significant bit (with weight 2**N) implicit.
+             *
+             * Special case for DIVISOR_LIMB == 100...000.
+             */
+            if( !(divisor_limb << 1) )
+                divisor_limb_inverted = ~(mpi_limb_t)0;
+            else
+                udiv_qrnnd(divisor_limb_inverted, dummy,
+                           -divisor_limb, 0, divisor_limb);
+
+            n1 = dividend_ptr[dividend_size - 1];
+            r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+            /* Possible optimization:
+             * if (r == 0
+             * && divisor_limb > ((n1 << normalization_steps)
+             *                 | (dividend_ptr[dividend_size - 2] >> ...)))
+             * ...one division less...
+             */
+            for( i = dividend_size - 2; i >= 0; i--) {
+                n0 = dividend_ptr[i];
+                UDIV_QRNND_PREINV(dummy, r, r,
+                                   ((n1 << normalization_steps)
+                          | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+                          divisor_limb, divisor_limb_inverted);
+                n1 = n0;
+            }
+            UDIV_QRNND_PREINV(dummy, r, r,
+                              n1 << normalization_steps,
+                              divisor_limb, divisor_limb_inverted);
+            return r >> normalization_steps;
+        }
+        else {
+            mpi_limb_t divisor_limb_inverted;
+
+            /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB.  The
+             * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+             * most significant bit (with weight 2**N) implicit.
+             *
+             * Special case for DIVISOR_LIMB == 100...000.
+             */
+            if( !(divisor_limb << 1) )
+                divisor_limb_inverted = ~(mpi_limb_t)0;
+            else
+                udiv_qrnnd(divisor_limb_inverted, dummy,
+                            -divisor_limb, 0, divisor_limb);
+
+            i = dividend_size - 1;
+            r = dividend_ptr[i];
+
+            if( r >= divisor_limb )
+                r = 0;
+            else
+                i--;
+
+            for( ; i >= 0; i--) {
+                n0 = dividend_ptr[i];
+                UDIV_QRNND_PREINV(dummy, r, r,
+                                  n0, divisor_limb, divisor_limb_inverted);
+            }
+            return r;
+        }
+    }
+    else {
+        if( UDIV_NEEDS_NORMALIZATION ) {
+            int normalization_steps;
+
+            count_leading_zeros(normalization_steps, divisor_limb);
+            if( normalization_steps ) {
+                divisor_limb <<= normalization_steps;
+
+                n1 = dividend_ptr[dividend_size - 1];
+                r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+                /* Possible optimization:
+                 * if (r == 0
+                 * && divisor_limb > ((n1 << normalization_steps)
+                 *                 | (dividend_ptr[dividend_size - 2] >> ...)))
+                 * ...one division less...
+                 */
+                for(i = dividend_size - 2; i >= 0; i--) {
+                    n0 = dividend_ptr[i];
+                    udiv_qrnnd (dummy, r, r,
+                                ((n1 << normalization_steps)
+                         | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+                         divisor_limb);
+                    n1 = n0;
+                }
+                udiv_qrnnd (dummy, r, r,
+                            n1 << normalization_steps,
+                            divisor_limb);
+                return r >> normalization_steps;
+            }
+        }
+        /* No normalization needed, either because udiv_qrnnd doesn't require
+         * it, or because DIVISOR_LIMB is already normalized.  */
+        i = dividend_size - 1;
+        r = dividend_ptr[i];
+
+        if(r >= divisor_limb)
+            r = 0;
+        else
+            i--;
+
+        for(; i >= 0; i--) {
+            n0 = dividend_ptr[i];
+            udiv_qrnnd (dummy, r, r, n0, divisor_limb);
+        }
+        return r;
+    }
+}
+
+
+/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
+ * the NSIZE-DSIZE least significant quotient limbs at QP
+ * and the DSIZE long remainder at NP.  If QEXTRA_LIMBS is
+ * non-zero, generate that many fraction bits and append them after the
+ * other quotient limbs.
+ * Return the most significant limb of the quotient, this is always 0 or 1.
+ *
+ * Preconditions:
+ * 0. NSIZE >= DSIZE.
+ * 1. The most significant bit of the divisor must be set.
+ * 2. QP must either not overlap with the input operands at all, or
+ *    QP + DSIZE >= NP must hold true.  (This means that it's
+ *    possible to put the quotient in the high part of NUM, right after the
+ *    remainder in NUM.
+ * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
+ */
+
+mpi_limb_t
+_gcry_mpih_divrem( mpi_ptr_t qp, mpi_size_t qextra_limbs,
+                      mpi_ptr_t np, mpi_size_t nsize,
+                      mpi_ptr_t dp, mpi_size_t dsize)
+{                     
+    mpi_limb_t most_significant_q_limb = 0;
+    
+    switch(dsize) {
+      case 0:
+        /* We are asked to divide by zero, so go ahead and do it!  (To make
+           the compiler not remove this statement, return the value.)  */
+        return 1 / dsize;
+
+      case 1:
+        {
+            mpi_size_t i;
+            mpi_limb_t n1;
+            mpi_limb_t d;
+
+            d = dp[0];
+            n1 = np[nsize - 1];
+
+            if( n1 >= d ) {
+                n1 -= d;
+                most_significant_q_limb = 1;
+            }
+
+            qp += qextra_limbs;
+            for( i = nsize - 2; i >= 0; i--)
+                udiv_qrnnd( qp[i], n1, n1, np[i], d );
+            qp -= qextra_limbs;
+
+            for( i = qextra_limbs - 1; i >= 0; i-- )
+                udiv_qrnnd (qp[i], n1, n1, 0, d);
+
+            np[0] = n1;
+        }
+        break;
+
+      case 2:
+        {
+            mpi_size_t i;
+            mpi_limb_t n1, n0, n2;
+            mpi_limb_t d1, d0;
+
+            np += nsize - 2;
+            d1 = dp[1];
+            d0 = dp[0];
+            n1 = np[1];
+            n0 = np[0];
+
+            if( n1 >= d1 && (n1 > d1 || n0 >= d0) ) {
+                sub_ddmmss (n1, n0, n1, n0, d1, d0);
+                most_significant_q_limb = 1;
+            }
+
+            for( i = qextra_limbs + nsize - 2 - 1; i >= 0; i-- ) {
+                mpi_limb_t q;
+                mpi_limb_t r;
+
+                if( i >= qextra_limbs )
+                    np--;
+                else
+                    np[0] = 0;
+
+                if( n1 == d1 ) {
+                    /* Q should be either 111..111 or 111..110.  Need special
+                     * treatment of this rare case as normal division would
+                     * give overflow.  */
+                    q = ~(mpi_limb_t)0;
+
+                    r = n0 + d1;
+                    if( r < d1 ) {   /* Carry in the addition? */
+                        add_ssaaaa( n1, n0, r - d0, np[0], 0, d0 );
+                        qp[i] = q;
+                        continue;
+                    }
+                    n1 = d0 - (d0 != 0?1:0);
+                    n0 = -d0;
+                }
+                else {
+                    udiv_qrnnd (q, r, n1, n0, d1);
+                    umul_ppmm (n1, n0, d0, q);
+                }
+
+                n2 = np[0];
+              q_test:
+                if( n1 > r || (n1 == r && n0 > n2) ) {
+                    /* The estimated Q was too large.  */
+                    q--;
+                    sub_ddmmss (n1, n0, n1, n0, 0, d0);
+                    r += d1;
+                    if( r >= d1 )    /* If not carry, test Q again.  */
+                        goto q_test;
+                }
+
+                qp[i] = q;
+                sub_ddmmss (n1, n0, r, n2, n1, n0);
+            }
+            np[1] = n1;
+            np[0] = n0;
+        }
+        break;
+
+      default:
+        {
+            mpi_size_t i;
+            mpi_limb_t dX, d1, n0;
+
+            np += nsize - dsize;
+            dX = dp[dsize - 1];
+            d1 = dp[dsize - 2];
+            n0 = np[dsize - 1];
+
+            if( n0 >= dX ) {
+                if(n0 > dX || _gcry_mpih_cmp(np, dp, dsize - 1) >= 0 ) {
+                    _gcry_mpih_sub_n(np, np, dp, dsize);
+                    n0 = np[dsize - 1];
+                    most_significant_q_limb = 1;
+                }
+            }
+
+            for( i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
+                mpi_limb_t q;
+                mpi_limb_t n1, n2;
+                mpi_limb_t cy_limb;
+
+                if( i >= qextra_limbs ) {
+                    np--;
+                    n2 = np[dsize];
+                }
+                else {
+                    n2 = np[dsize - 1];
+                    MPN_COPY_DECR (np + 1, np, dsize - 1);
+                    np[0] = 0;
+                }
+
+                if( n0 == dX ) {
+                    /* This might over-estimate q, but it's probably not worth
+                     * the extra code here to find out.  */
+                    q = ~(mpi_limb_t)0;
+                }
+                else {
+                    mpi_limb_t r;
+
+                    udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
+                    umul_ppmm(n1, n0, d1, q);
+
+                    while( n1 > r || (n1 == r && n0 > np[dsize - 2])) {
+                        q--;
+                        r += dX;
+                        if( r < dX ) /* I.e. "carry in previous addition?" */
+                            break;
+                        n1 -= n0 < d1;
+                        n0 -= d1;
+                    }
+                }
+
+                /* Possible optimization: We already have (q * n0) and (1 * n1)
+                 * after the calculation of q.  Taking advantage of that, we
+                 * could make this loop make two iterations less.  */
+                cy_limb = _gcry_mpih_submul_1(np, dp, dsize, q);
+
+                if( n2 != cy_limb ) {
+                    _gcry_mpih_add_n(np, np, dp, dsize);
+                    q--;
+                }
+
+                qp[i] = q;
+                n0 = np[dsize - 1];
+            }
+        }
+    }
+
+    return most_significant_q_limb;
+}
+
+
+
+
+static void _gcry_mpi_tdiv_qr(struct gcry_mpi *quot, struct gcry_mpi *rem, struct gcry_mpi *num, struct gcry_mpi *den)
+{
+    mpi_ptr_t np, dp;
+    mpi_ptr_t qp, rp;
+    mpi_size_t nsize = num->nlimbs;
+    mpi_size_t dsize = den->nlimbs;
+    mpi_size_t qsize, rsize;
+    mpi_size_t sign_remainder = num->sign;
+    mpi_size_t sign_quotient = num->sign ^ den->sign;
+    unsigned normalization_steps;
+    mpi_limb_t q_limb;
+    mpi_ptr_t marker[5];
+    int markidx=0;
+
+    /* Ensure space is enough for quotient and remainder.
+     * We need space for an extra limb in the remainder, because it's
+     * up-shifted (normalized) below.  */
+    rsize = nsize + 1;
+    mpi_resize( rem, rsize);
+
+    qsize = rsize - dsize;        /* qsize cannot be bigger than this.  */
+    if( qsize <= 0 ) {
+        if( num != rem ) {
+            rem->nlimbs = num->nlimbs;
+            rem->sign = num->sign;
+            MPN_COPY(rem->d, num->d, nsize);
+        }
+        if( quot ) {
+            /* This needs to follow the assignment to rem, in case the
+             * numerator and quotient are the same.  */
+            quot->nlimbs = 0;
+            quot->sign = 0;
+        }
+        return;
+    }
+
+    if( quot )
+        mpi_resize( quot, qsize);
+
+    /* Read pointers here, when reallocation is finished.  */
+    np = num->d;
+    dp = den->d;
+    rp = rem->d;
+
+    /* Optimize division by a single-limb divisor.  */
+    if( dsize == 1 ) {
+        mpi_limb_t rlimb;
+        if( quot ) {
+            qp = quot->d;
+            rlimb = _gcry_mpih_divmod_1( qp, np, nsize, dp[0] );
+            qsize -= qp[qsize - 1] == 0;
+            quot->nlimbs = qsize;
+            quot->sign = sign_quotient;
+        }
+        else
+            rlimb = _gcry_mpih_mod_1( np, nsize, dp[0] );
+        rp[0] = rlimb;
+        rsize = rlimb != 0?1:0;
+        rem->nlimbs = rsize;
+        rem->sign = sign_remainder;
+        return;
+    }
+
+
+    if( quot ) {
+        qp = quot->d;
+        /* Make sure QP and NP point to different objects.  Otherwise the
+         * numerator would be gradually overwritten by the quotient limbs.  */
+        if(qp == np) { /* Copy NP object to temporary space.  */
+            np = marker[markidx++] = mpi_alloc_limb_space(nsize,
+                                                          mpi_is_secure(quot));
+            MPN_COPY(np, qp, nsize);
+        }
+    }
+    else /* Put quotient at top of remainder. */
+        qp = rp + dsize;
+
+    count_leading_zeros( normalization_steps, dp[dsize - 1] );
+
+    /* Normalize the denominator, i.e. make its most significant bit set by
+     * shifting it NORMALIZATION_STEPS bits to the left.  Also shift the
+     * numerator the same number of steps (to keep the quotient the same!).
+     */
+    if( normalization_steps ) {
+        mpi_ptr_t tp;
+        mpi_limb_t nlimb;
+
+        /* Shift up the denominator setting the most significant bit of
+         * the most significant word.  Use temporary storage not to clobber
+         * the original contents of the denominator.  */
+        tp = marker[markidx++] = mpi_alloc_limb_space(dsize,mpi_is_secure(den));
+        _gcry_mpih_lshift( tp, dp, dsize, normalization_steps );
+        dp = tp;
+
+        /* Shift up the numerator, possibly introducing a new most
+         * significant word.  Move the shifted numerator in the remainder
+         * meanwhile.  */
+        nlimb = _gcry_mpih_lshift(rp, np, nsize, normalization_steps);
+        if( nlimb ) {
+            rp[nsize] = nlimb;
+            rsize = nsize + 1;
+        }
+        else
+            rsize = nsize;
+    }
+    else {
+        /* The denominator is already normalized, as required.  Copy it to
+         * temporary space if it overlaps with the quotient or remainder.  */
+        if( dp == rp || (quot && (dp == qp))) {
+            mpi_ptr_t tp;
+
+            tp = marker[markidx++] = mpi_alloc_limb_space(dsize, mpi_is_secure(den));
+            MPN_COPY( tp, dp, dsize );
+            dp = tp;
+        }
+
+        /* Move the numerator to the remainder.  */
+        if( rp != np )
+            MPN_COPY(rp, np, nsize);
+
+        rsize = nsize;
+    }
+
+    q_limb = _gcry_mpih_divrem( qp, 0, rp, rsize, dp, dsize );
+
+    if( quot ) {
+        qsize = rsize - dsize;
+        if(q_limb) {
+            qp[qsize] = q_limb;
+            qsize += 1;
+        }
+
+        quot->nlimbs = qsize;
+        quot->sign = sign_quotient;
+    }
+
+    rsize = dsize;
+    MPN_NORMALIZE (rp, rsize);
+
+    if( normalization_steps && rsize ) {
+        _gcry_mpih_rshift(rp, rp, rsize, normalization_steps);
+        rsize -= rp[rsize - 1] == 0?1:0;
+    }
+
+    rem->nlimbs = rsize;
+    rem->sign   = sign_remainder;
+    while( markidx )
+//        mpi_free_limb_space(marker[--markidx]);
+        gcry_free(marker[--markidx]);
+}
+
+/* If den == quot, den needs temporary storage.
+ * If den == rem, den needs temporary storage.
+ * If num == quot, num needs temporary storage.
+ * If den has temporary storage, it can be normalized while being copied,
+ *   i.e no extra storage should be allocated.
+ */
+
+static void _gcry_mpi_tdiv_r(struct gcry_mpi *rem, struct gcry_mpi *num, struct gcry_mpi *den)
+{
+    _gcry_mpi_tdiv_qr(NULL, rem, num, den );
+}
+
+
+
+void mpi_fdiv_r(struct gcry_mpi *rem, struct gcry_mpi *dividend, struct gcry_mpi *divisor )
+{
+    int divisor_sign = divisor->sign;
+    struct gcry_mpi *temp_divisor = NULL;
+
+    /* We need the original value of the divisor after the remainder has been
+     * preliminary calculated.  We have to copy it to temporary space if it's
+     * the same variable as REM.  */
+    if( rem == divisor ) {
+        temp_divisor = mpi_copy( divisor );
+        divisor = temp_divisor;
+    }
+
+    _gcry_mpi_tdiv_r( rem, dividend, divisor );
+
+    if( ((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs )
+        mpi_add( rem, rem, divisor);
+
+    if( temp_divisor )
+        mpi_free(temp_divisor);
+}
+
+
+
+void
+_gcry_mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size )
+{
+    mpi_size_t i;
+    mpi_limb_t cy_limb;
+    mpi_limb_t v_limb;
+    
+    /* Multiply by the first limb in V separately, as the result can be
+     * stored (not added) to PROD.  We also avoid a loop for zeroing.  */
+    v_limb = up[0];
+    if( v_limb <= 1 ) {
+        if( v_limb == 1 )
+            MPN_COPY( prodp, up, size );
+        else
+            MPN_ZERO(prodp, size);
+        cy_limb = 0;
+    }   
+    else
+        cy_limb = _gcry_mpih_mul_1( prodp, up, size, v_limb );
+        
+    prodp[size] = cy_limb;
+    prodp++;
+
+    /* For each iteration in the outer loop, multiply one limb from
+     * U with one limb from V, and add it to PROD.  */
+    for( i=1; i < size; i++) {
+        v_limb = up[i];
+        if( v_limb <= 1 ) {
+            cy_limb = 0;
+            if( v_limb == 1 )
+                cy_limb = _gcry_mpih_add_n(prodp, prodp, up, size);
+        }
+        else
+            cy_limb = _gcry_mpih_addmul_1(prodp, up, size, v_limb);
+
+        prodp[size] = cy_limb;
+        prodp++;
+    }
+}
+
+
+/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
+ * both with SIZE limbs, and store the result at PRODP.  2 * SIZE limbs are
+ * always stored.  Return the most significant limb.
+ *
+ * Argument constraints:
+ * 1. PRODP != UP and PRODP != VP, i.e. the destination
+ *    must be distinct from the multiplier and the multiplicand.
+ *
+ *
+ * Handle simple cases with traditional multiplication.
+ *
+ * This is the most critical code of multiplication.  All multiplies rely
+ * on this, both small and huge.  Small ones arrive here immediately.  Huge
+ * ones arrive here as this is the base case for Karatsuba's recursive
+ * algorithm below.
+ */
+
+static mpi_limb_t
+mul_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up,
+                                 mpi_ptr_t vp, mpi_size_t size)
+{
+    mpi_size_t i;
+    mpi_limb_t cy;
+    mpi_limb_t v_limb;
+
+    /* Multiply by the first limb in V separately, as the result can be
+     * stored (not added) to PROD.  We also avoid a loop for zeroing.  */
+    v_limb = vp[0];
+    if( v_limb <= 1 ) {
+        if( v_limb == 1 )
+            MPN_COPY( prodp, up, size );
+        else
+            MPN_ZERO( prodp, size );
+        cy = 0;
+    }
+    else
+        cy = _gcry_mpih_mul_1( prodp, up, size, v_limb );
+
+    prodp[size] = cy;
+    prodp++;
+
+    /* For each iteration in the outer loop, multiply one limb from
+     * U with one limb from V, and add it to PROD.  */
+    for( i = 1; i < size; i++ ) {
+        v_limb = vp[i];
+        if( v_limb <= 1 ) {
+            cy = 0;
+            if( v_limb == 1 )
+               cy = _gcry_mpih_add_n(prodp, prodp, up, size);
+        }
+        else
+            cy = _gcry_mpih_addmul_1(prodp, up, size, v_limb);
+
+        prodp[size] = cy;
+        prodp++;
+    }
+
+    return cy;
+}
+
+
+
+void
+_gcry_mpih_sqr_n( mpi_ptr_t prodp,
+                  mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
+{
+    if( size & 1 ) {
+        /* The size is odd, and the code below doesn't handle that.
+         * Multiply the least significant (size - 1) limbs with a recursive
+         * call, and handle the most significant limb of S1 and S2
+         * separately.
+         * A slightly faster way to do this would be to make the Karatsuba
+         * code below behave as if the size were even, and let it check for
+         * odd size in the end.  I.e., in essence move this code to the end.
+         * Doing so would save us a recursive call, and potentially make the
+         * stack grow a lot less.
+         */
+        mpi_size_t esize = size - 1;       /* even size */
+        mpi_limb_t cy_limb;
+
+        MPN_SQR_N_RECURSE( prodp, up, esize, tspace );
+        cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, up[esize] );
+        prodp[esize + esize] = cy_limb;
+        cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, size, up[esize] );
+
+        prodp[esize + size] = cy_limb;
+    }
+    else {
+        mpi_size_t hsize = size >> 1;
+        mpi_limb_t cy;
+
+        /* Product H.      ________________  ________________
+         *                |_____U1 x U1____||____U0 x U0_____|
+         * Put result in upper part of PROD and pass low part of TSPACE
+         * as new TSPACE.
+         */
+        MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
+
+        /* Product M.      ________________
+         *                |_(U1-U0)(U0-U1)_|
+         */
+        if( _gcry_mpih_cmp( up + hsize, up, hsize) >= 0 )
+            _gcry_mpih_sub_n( prodp, up + hsize, up, hsize);
+        else
+            _gcry_mpih_sub_n (prodp, up, up + hsize, hsize);
+
+        /* Read temporary operands from low part of PROD.
+         * Put result in low part of TSPACE using upper part of TSPACE
+         * as new TSPACE.  */
+        MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
+
+        /* Add/copy product H  */
+        MPN_COPY(prodp + hsize, prodp + size, hsize);
+        cy = _gcry_mpih_add_n(prodp + size, prodp + size,
+                           prodp + size + hsize, hsize);
+
+        /* Add product M (if NEGFLG M is a negative number).  */
+        cy -= _gcry_mpih_sub_n (prodp + hsize, prodp + hsize, tspace, size);
+
+        /* Product L.      ________________  ________________
+         *                |________________||____U0 x U0_____|
+         * Read temporary operands from low part of PROD.
+         * Put result in low part of TSPACE using upper part of TSPACE
+         * as new TSPACE.  */
+        MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size);
+
+        /* Add/copy Product L (twice).  */
+        cy += _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace, size);
+        if( cy )
+            _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size,
+                                                            hsize, cy);
+
+        MPN_COPY(prodp, tspace, hsize);
+        cy = _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
+        if( cy )
+            _gcry_mpih_add_1 (prodp + size, prodp + size, size, 1);
+    }
+}
+
+
+static void
+mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
+                        mpi_size_t size, mpi_ptr_t tspace )
+{
+    if( size & 1 ) {
+      /* The size is odd, and the code below doesn't handle that.
+       * Multiply the least significant (size - 1) limbs with a recursive
+       * call, and handle the most significant limb of S1 and S2
+       * separately.
+       * A slightly faster way to do this would be to make the Karatsuba
+       * code below behave as if the size were even, and let it check for
+       * odd size in the end.  I.e., in essence move this code to the end.
+       * Doing so would save us a recursive call, and potentially make the
+       * stack grow a lot less.
+       */
+      mpi_size_t esize = size - 1;       /* even size */
+      mpi_limb_t cy_limb;
+
+      MPN_MUL_N_RECURSE( prodp, up, vp, esize, tspace );
+      cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, vp[esize] );
+      prodp[esize + esize] = cy_limb;
+      cy_limb = _gcry_mpih_addmul_1( prodp + esize, vp, size, up[esize] );
+      prodp[esize + size] = cy_limb;
+    }
+    else {
+        /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
+         *
+         * Split U in two pieces, U1 and U0, such that
+         * U = U0 + U1*(B**n),
+         * and V in V1 and V0, such that
+         * V = V0 + V1*(B**n).
+         *
+         * UV is then computed recursively using the identity
+         *
+         *        2n   n          n                     n
+         * UV = (B  + B )U V  +  B (U -U )(V -V )  +  (B + 1)U V
+         *                1 1        1  0   0  1              0 0
+         *
+         * Where B = 2**BITS_PER_MP_LIMB.
+         */
+        mpi_size_t hsize = size >> 1;
+        mpi_limb_t cy;
+        int negflg;
+
+        /* Product H.      ________________  ________________
+         *                |_____U1 x V1____||____U0 x V0_____|
+         * Put result in upper part of PROD and pass low part of TSPACE
+         * as new TSPACE.
+         */
+        MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, tspace);
+
+        /* Product M.      ________________
+         *                |_(U1-U0)(V0-V1)_|
+         */
+        if( _gcry_mpih_cmp(up + hsize, up, hsize) >= 0 ) {
+            _gcry_mpih_sub_n(prodp, up + hsize, up, hsize);
+            negflg = 0;
+        }
+        else {
+            _gcry_mpih_sub_n(prodp, up, up + hsize, hsize);
+            negflg = 1;
+        }
+        if( _gcry_mpih_cmp(vp + hsize, vp, hsize) >= 0 ) {
+            _gcry_mpih_sub_n(prodp + hsize, vp + hsize, vp, hsize);
+            negflg ^= 1;
+        }
+        else {
+            _gcry_mpih_sub_n(prodp + hsize, vp, vp + hsize, hsize);
+            /* No change of NEGFLG.  */
+        }
+        /* Read temporary operands from low part of PROD.
+         * Put result in low part of TSPACE using upper part of TSPACE
+         * as new TSPACE.
+         */
+        MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, tspace + size);
+
+        /* Add/copy product H. */
+        MPN_COPY (prodp + hsize, prodp + size, hsize);
+        cy = _gcry_mpih_add_n( prodp + size, prodp + size,
+                            prodp + size + hsize, hsize);
+
+        /* Add product M (if NEGFLG M is a negative number) */
+        if(negflg)
+            cy -= _gcry_mpih_sub_n(prodp + hsize, prodp + hsize, tspace, size);
+        else
+            cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size);
+
+        /* Product L.      ________________  ________________
+         *                |________________||____U0 x V0_____|
+         * Read temporary operands from low part of PROD.
+         * Put result in low part of TSPACE using upper part of TSPACE
+         * as new TSPACE.
+         */
+        MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
+
+        /* Add/copy Product L (twice) */
+
+        cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size);
+        if( cy )
+          _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy);
+
+        MPN_COPY(prodp, tspace, hsize);
+        cy = _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize);
+        if( cy )
+            _gcry_mpih_add_1(prodp + size, prodp + size, size, 1);
+    }
+}
+
+
+void
+_gcry_mpih_mul_karatsuba_case( mpi_ptr_t prodp,
+                                  mpi_ptr_t up, mpi_size_t usize,
+                                  mpi_ptr_t vp, mpi_size_t vsize,
+                                  struct karatsuba_ctx *ctx )
+{
+    mpi_limb_t cy;
+
+    if( !ctx->tspace || ctx->tspace_size < vsize ) {
+        if( ctx->tspace )
+            mpi_free_limb_space( ctx->tspace );
+        ctx->tspace = mpi_alloc_limb_space( 2 * vsize,
+                                       gcry_is_secure( up ) || gcry_is_secure( vp ) );
+        ctx->tspace_size = vsize;
+    }
+
+    MPN_MUL_N_RECURSE( prodp, up, vp, vsize, ctx->tspace );
+
+    prodp += vsize;
+    up += vsize;
+    usize -= vsize;
+    if( usize >= vsize ) {
+        if( !ctx->tp || ctx->tp_size < vsize ) {
+            if( ctx->tp )
+                mpi_free_limb_space( ctx->tp );
+            ctx->tp = mpi_alloc_limb_space( 2 * vsize, gcry_is_secure( up )
+                                                      || gcry_is_secure( vp ) );
+            ctx->tp_size = vsize;
+        }
+
+        do {
+            MPN_MUL_N_RECURSE( ctx->tp, up, vp, vsize, ctx->tspace );
+            cy = _gcry_mpih_add_n( prodp, prodp, ctx->tp, vsize );
+            _gcry_mpih_add_1( prodp + vsize, ctx->tp + vsize, vsize, cy );
+            prodp += vsize;
+            up += vsize;
+            usize -= vsize;
+        } while( usize >= vsize );
+    }
+
+    if( usize ) {
+        if( usize < KARATSUBA_THRESHOLD ) {
+            _gcry_mpih_mul( ctx->tspace, vp, vsize, up, usize );
+        }
+        else {
+            if( !ctx->next ) {
+                ctx->next = gcry_xcalloc( 1, sizeof *ctx );
+            }
+            _gcry_mpih_mul_karatsuba_case( ctx->tspace,
+                                        vp, vsize,
+                                        up, usize,
+                                        ctx->next );
+        }
+
+        cy = _gcry_mpih_add_n( prodp, prodp, ctx->tspace, vsize);
+        _gcry_mpih_add_1( prodp + vsize, ctx->tspace + vsize, usize, cy );
+    }
+}
+
+
+void
+_gcry_mpih_release_karatsuba_ctx( struct karatsuba_ctx *ctx )
+{
+    struct karatsuba_ctx *ctx2;
+
+    if( ctx->tp )
+        mpi_free_limb_space( ctx->tp );
+    if( ctx->tspace )
+        mpi_free_limb_space( ctx->tspace );
+    for( ctx=ctx->next; ctx; ctx = ctx2 ) {
+        ctx2 = ctx->next;
+        if( ctx->tp )
+            mpi_free_limb_space( ctx->tp );
+        if( ctx->tspace )
+            mpi_free_limb_space( ctx->tspace );
+        gcry_free( ctx );
+    }
+}
+
+/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
+ * and v (pointed to by VP, with VSIZE limbs), and store the result at
+ * PRODP.  USIZE + VSIZE limbs are always stored, but if the input
+ * operands are normalized.  Return the most significant limb of the
+ * result.
+ *
+ * NOTE: The space pointed to by PRODP is overwritten before finished
+ * with U and V, so overlap is an error.
+ *
+ * Argument constraints:
+ * 1. USIZE >= VSIZE.
+ * 2. PRODP != UP and PRODP != VP, i.e. the destination
+ *    must be distinct from the multiplier and the multiplicand.
+ */
+
+mpi_limb_t
+_gcry_mpih_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
+                   mpi_ptr_t vp, mpi_size_t vsize)
+{
+    mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
+    mpi_limb_t cy;
+    struct karatsuba_ctx ctx;
+
+    if( vsize < KARATSUBA_THRESHOLD ) {
+        mpi_size_t i;
+        mpi_limb_t v_limb;
+
+        if( !vsize )
+            return 0;
+
+        /* Multiply by the first limb in V separately, as the result can be
+         * stored (not added) to PROD.  We also avoid a loop for zeroing.  */
+        v_limb = vp[0];
+        if( v_limb <= 1 ) {
+            if( v_limb == 1 )
+                MPN_COPY( prodp, up, usize );
+            else
+                MPN_ZERO( prodp, usize );
+            cy = 0;
+        }
+        else
+            cy = _gcry_mpih_mul_1( prodp, up, usize, v_limb );
+
+        prodp[usize] = cy;
+        prodp++;
+
+        /* For each iteration in the outer loop, multiply one limb from
+         * U with one limb from V, and add it to PROD.  */
+        for( i = 1; i < vsize; i++ ) {
+            v_limb = vp[i];
+            if( v_limb <= 1 ) {
+                cy = 0;
+                if( v_limb == 1 )
+                   cy = _gcry_mpih_add_n(prodp, prodp, up, usize);
+            }
+            else
+                cy = _gcry_mpih_addmul_1(prodp, up, usize, v_limb);
+
+            prodp[usize] = cy;
+            prodp++;
+        }
+
+        return cy;
+    }
+
+    memset( &ctx, 0, sizeof ctx );
+    _gcry_mpih_mul_karatsuba_case( prodp, up, usize, vp, vsize, &ctx );
+    _gcry_mpih_release_karatsuba_ctx( &ctx );
+    return *prod_endp;
+}
+
+
+
+
+
+
+
+
+/****************
+ * RES = BASE ^ EXP mod MOD
+ */
+void mpi_powm(struct gcry_mpi *res, struct gcry_mpi *base, struct gcry_mpi *exp, struct gcry_mpi *mod)
+{
+    mpi_ptr_t  rp, ep, mp, bp;
+    mpi_size_t esize, msize, bsize, rsize;
+    int        esign, msign, bsign, rsign;
+    int        esec,  msec,  bsec,  rsec;
+    mpi_size_t size;
+    int mod_shift_cnt;
+    int negative_result;
+    mpi_ptr_t mp_marker=NULL, bp_marker=NULL, ep_marker=NULL;
+    mpi_ptr_t xp_marker=NULL;
+    int assign_rp=0;
+    mpi_ptr_t tspace = NULL;
+    mpi_size_t tsize=0;   /* to avoid compiler warning */
+			  /* fixme: we should check that the warning is void*/
+
+    esize = exp->nlimbs;
+    msize = mod->nlimbs;
+    size = 2 * msize;
+    esign = exp->sign;
+    msign = mod->sign;
+
+    esec = mpi_is_secure(exp);
+    msec = mpi_is_secure(mod);
+    bsec = mpi_is_secure(base);
+    rsec = mpi_is_secure(res);
+
+    rp = res->d;
+    ep = exp->d;
+
+    if( !msize )
+	msize = 1 / msize;	    /* provoke a signal */
+
+    if( !esize ) {
+	/* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+	 * depending on if MOD equals 1.  */
+	rp[0] = 1;
+	res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
+	res->sign = 0;
+	goto leave;
+    }
+
+    /* Normalize MOD (i.e. make its most significant bit set) as required by
+     * mpn_divrem.  This will make the intermediate values in the calculation
+     * slightly larger, but the correct result is obtained after a final
+     * reduction using the original MOD value.	*/
+    mp = mp_marker = mpi_alloc_limb_space(msize, msec);
+    count_leading_zeros( mod_shift_cnt, mod->d[msize-1] );
+    if( mod_shift_cnt )
+	_gcry_mpih_lshift( mp, mod->d, msize, mod_shift_cnt );
+    else
+	MPN_COPY( mp, mod->d, msize );
+
+    bsize = base->nlimbs;
+    bsign = base->sign;
+    if( bsize > msize ) { /* The base is larger than the module. Reduce it. */
+	/* Allocate (BSIZE + 1) with space for remainder and quotient.
+	 * (The quotient is (bsize - msize + 1) limbs.)  */
+	bp = bp_marker = mpi_alloc_limb_space( bsize + 1, bsec );
+	MPN_COPY( bp, base->d, bsize );
+	/* We don't care about the quotient, store it above the remainder,
+	 * at BP + MSIZE.  */
+	_gcry_mpih_divrem( bp + msize, 0, bp, bsize, mp, msize );
+	bsize = msize;
+	/* Canonicalize the base, since we are going to multiply with it
+	 * quite a few times.  */
+	MPN_NORMALIZE( bp, bsize );
+    }
+    else
+	bp = base->d;
+
+    if( !bsize ) {
+	res->nlimbs = 0;
+	res->sign = 0;
+	goto leave;
+    }
+
+    if( res->alloced < size ) {
+	/* We have to allocate more space for RES.  If any of the input
+	 * parameters are identical to RES, defer deallocation of the old
+	 * space.  */
+	if( rp == ep || rp == mp || rp == bp ) {
+	    rp = mpi_alloc_limb_space( size, rsec );
+	    assign_rp = 1;
+	}
+	else {
+	    mpi_resize( res, size );
+	    rp = res->d;
+	}
+    }
+    else { /* Make BASE, EXP and MOD not overlap with RES.  */
+	if( rp == bp ) {
+	    /* RES and BASE are identical.  Allocate temp. space for BASE.  */
+	    assert( !bp_marker );
+	    bp = bp_marker = mpi_alloc_limb_space( bsize, bsec );
+	    MPN_COPY(bp, rp, bsize);
+	}
+	if( rp == ep ) {
+	    /* RES and EXP are identical.  Allocate temp. space for EXP.  */
+	    ep = ep_marker = mpi_alloc_limb_space( esize, esec );
+	    MPN_COPY(ep, rp, esize);
+	}
+	if( rp == mp ) {
+	    /* RES and MOD are identical.  Allocate temporary space for MOD.*/
+	    assert( !mp_marker );
+	    mp = mp_marker = mpi_alloc_limb_space( msize, msec );
+	    MPN_COPY(mp, rp, msize);
+	}
+    }
+
+    MPN_COPY( rp, bp, bsize );
+    rsize = bsize;
+    rsign = bsign;
+
+    {
+	mpi_size_t i;
+	mpi_ptr_t xp = xp_marker = mpi_alloc_limb_space( 2 * (msize + 1), msec );
+	int c;
+	mpi_limb_t e;
+	mpi_limb_t carry_limb;
+	struct karatsuba_ctx karactx;
+
+	memset( &karactx, 0, sizeof karactx );
+	negative_result = (ep[0] & 1) && base->sign;
+
+	i = esize - 1;
+	e = ep[i];
+	count_leading_zeros (c, e);
+	e = (e << c) << 1;     /* shift the exp bits to the left, lose msb */
+	c = BITS_PER_MPI_LIMB - 1 - c;
+
+	/* Main loop.
+	 *
+	 * Make the result be pointed to alternately by XP and RP.  This
+	 * helps us avoid block copying, which would otherwise be necessary
+	 * with the overlap restrictions of _gcry_mpih_divmod. With 50% probability
+	 * the result after this loop will be in the area originally pointed
+	 * by RP (==RES->d), and with 50% probability in the area originally
+	 * pointed to by XP.
+	 */
+
+	for(;;) {
+	    while( c ) {
+		mpi_ptr_t tp;
+		mpi_size_t xsize;
+
+		/*mpih_mul_n(xp, rp, rp, rsize);*/
+		if( rsize < KARATSUBA_THRESHOLD )
+		    _gcry_mpih_sqr_n_basecase( xp, rp, rsize );
+		else {
+		    if( !tspace ) {
+			tsize = 2 * rsize;
+			tspace = mpi_alloc_limb_space( tsize, 0 );
+		    }
+		    else if( tsize < (2*rsize) ) {
+			mpi_free_limb_space( tspace );
+			tsize = 2 * rsize;
+			tspace = mpi_alloc_limb_space( tsize, 0 );
+		    }
+		    _gcry_mpih_sqr_n( xp, rp, rsize, tspace );
+		}
+
+		xsize = 2 * rsize;
+		if( xsize > msize ) {
+		    _gcry_mpih_divrem(xp + msize, 0, xp, xsize, mp, msize);
+		    xsize = msize;
+		}
+
+		tp = rp; rp = xp; xp = tp;
+		rsize = xsize;
+
+		if( (mpi_limb_signed_t)e < 0 ) {
+		    /*mpih_mul( xp, rp, rsize, bp, bsize );*/
+		    if( bsize < KARATSUBA_THRESHOLD ) {
+			_gcry_mpih_mul( xp, rp, rsize, bp, bsize );
+		    }
+		    else {
+			_gcry_mpih_mul_karatsuba_case(
+				     xp, rp, rsize, bp, bsize, &karactx );
+		    }
+
+		    xsize = rsize + bsize;
+		    if( xsize > msize ) {
+			_gcry_mpih_divrem(xp + msize, 0, xp, xsize, mp, msize);
+			xsize = msize;
+		    }
+
+		    tp = rp; rp = xp; xp = tp;
+		    rsize = xsize;
+		}
+		e <<= 1;
+		c--;
+	    }
+
+	    i--;
+	    if( i < 0 )
+		break;
+	    e = ep[i];
+	    c = BITS_PER_MPI_LIMB;
+	}
+
+	/* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT
+	 * steps.  Adjust the result by reducing it with the original MOD.
+	 *
+	 * Also make sure the result is put in RES->d (where it already
+	 * might be, see above).
+	 */
+	if( mod_shift_cnt ) {
+	    carry_limb = _gcry_mpih_lshift( res->d, rp, rsize, mod_shift_cnt);
+	    rp = res->d;
+	    if( carry_limb ) {
+		rp[rsize] = carry_limb;
+		rsize++;
+	    }
+	}
+	else {
+	    MPN_COPY( res->d, rp, rsize);
+	    rp = res->d;
+	}
+
+	if( rsize >= msize ) {
+	    _gcry_mpih_divrem(rp + msize, 0, rp, rsize, mp, msize);
+	    rsize = msize;
+	}
+
+	/* Remove any leading zero words from the result.  */
+	if( mod_shift_cnt )
+	    _gcry_mpih_rshift( rp, rp, rsize, mod_shift_cnt);
+	MPN_NORMALIZE (rp, rsize);
+
+	_gcry_mpih_release_karatsuba_ctx( &karactx );
+    }
+
+    if( negative_result && rsize ) {
+	if( mod_shift_cnt )
+	    _gcry_mpih_rshift( mp, mp, msize, mod_shift_cnt);
+	_gcry_mpih_sub( rp, mp, msize, rp, rsize);
+	rsize = msize;
+	rsign = msign;
+	MPN_NORMALIZE(rp, rsize);
+    }
+    res->nlimbs = rsize;
+    res->sign = rsign;
+
+  leave:
+    if( assign_rp ) mpi_assign_limb_space( res, rp, size );
+    if( mp_marker ) mpi_free_limb_space( mp_marker );
+    if( bp_marker ) mpi_free_limb_space( bp_marker );
+    if( ep_marker ) mpi_free_limb_space( ep_marker );
+    if( xp_marker ) mpi_free_limb_space( xp_marker );
+    if( tspace )    mpi_free_limb_space( tspace );
+}
+
+
+void mpi_sub(struct gcry_mpi *w, struct gcry_mpi *u, struct gcry_mpi *v)
+{
+    if( w == v ) {
+        struct gcry_mpi *vv = mpi_copy(v);
+        vv->sign = !vv->sign;
+        mpi_add( w, u, vv );
+        mpi_free(vv);
+    }
+    else {
+        /* fixme: this is not thread-save (we temp. modify v) */
+        v->sign = !v->sign;
+        mpi_add( w, u, v );
+        v->sign = !v->sign;
+    }
+}
+
+void _gcry_mpi_fdiv_r(struct gcry_mpi *rem, struct gcry_mpi *dividend, struct gcry_mpi *divisor )
+{
+    int divisor_sign = divisor->sign;
+    struct gcry_mpi * temp_divisor = NULL;
+
+    /* We need the original value of the divisor after the remainder has been
+     * preliminary calculated.  We have to copy it to temporary space if it's
+     * the same variable as REM.  */
+    if( rem == divisor ) {
+        temp_divisor = mpi_copy( divisor );
+        divisor = temp_divisor;
+    }
+
+    _gcry_mpi_tdiv_r( rem, dividend, divisor );
+
+    if( ((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs )
+        mpi_add( rem, rem, divisor);
+
+    if( temp_divisor )
+        mpi_free(temp_divisor);
+}
+
+
+
+
+void mpi_mul(struct gcry_mpi *w, struct gcry_mpi *u,struct gcry_mpi *v)
+{           
+    mpi_size_t usize, vsize, wsize;
+    mpi_ptr_t up, vp, wp;
+    mpi_limb_t cy;
+    int usign, vsign, usecure, vsecure, sign_product;
+    int assign_wp=0;
+    mpi_ptr_t tmp_limb=NULL;
+    
+        
+    if( u->nlimbs < v->nlimbs ) { /* Swap U and V. */
+        usize = v->nlimbs;
+        usign = v->sign;
+        usecure = mpi_is_secure(v);
+        up    = v->d;
+        vsize = u->nlimbs;
+        vsign = u->sign;
+        vsecure = mpi_is_secure(u);
+        vp    = u->d;
+    }
+    else {
+        usize = u->nlimbs;
+        usign = u->sign;
+        usecure = mpi_is_secure(u);
+        up    = u->d;
+        vsize = v->nlimbs;
+        vsign = v->sign;
+        vsecure = mpi_is_secure(v);
+        vp    = v->d;
+    }
+    sign_product = usign ^ vsign;
+    wp = w->d;
+
+    /* Ensure W has space enough to store the result.  */
+    wsize = usize + vsize;
+    if ( !mpi_is_secure (w) && (mpi_is_secure (u) || mpi_is_secure (v)) ) {
+        /* w is not allocated in secure space but u or v is.  To make sure
+         * that no temporray results are stored in w, we temporary use 
+         * a newly allocated limb space for w */
+        wp = mpi_alloc_limb_space( wsize, 1 );
+        assign_wp = 2; /* mark it as 2 so that we can later copy it back to
+                        * mormal memory */
+    }
+    else if( w->alloced < wsize ) {
+        if( wp == up || wp == vp ) {
+            wp = mpi_alloc_limb_space( wsize, mpi_is_secure(w) );
+            assign_wp = 1;
+        }
+        else {
+            mpi_resize(w, wsize );
+            wp = w->d;
+        }
+    }
+    else { /* Make U and V not overlap with W.  */
+        if( wp == up ) {
+            /* W and U are identical.  Allocate temporary space for U.  */
+            up = tmp_limb = mpi_alloc_limb_space( usize, usecure  );
+            /* Is V identical too?  Keep it identical with U.  */
+            if( wp == vp )
+                vp = up;
+            /* Copy to the temporary space.  */
+            MPN_COPY( up, wp, usize );
+        }
+        else if( wp == vp ) {
+            /* W and V are identical.  Allocate temporary space for V.  */
+            vp = tmp_limb = mpi_alloc_limb_space( vsize, vsecure );
+            /* Copy to the temporary space.  */
+            MPN_COPY( vp, wp, vsize );
+        }
+    }
+
+    if( !vsize )
+        wsize = 0;
+    else {
+        cy = _gcry_mpih_mul( wp, up, usize, vp, vsize );
+        wsize -= cy? 0:1;
+    }
+
+    if( assign_wp ) {
+        if (assign_wp == 2) {
+            /* copy the temp wp from secure memory back to normal memory */
+            mpi_ptr_t tmp_wp = mpi_alloc_limb_space (wsize, 0);
+            MPN_COPY (tmp_wp, wp, wsize);
+            mpi_free_limb_space (wp);
+            wp = tmp_wp;
+        }
+        mpi_assign_limb_space( w, wp, wsize );
+    }
+    w->nlimbs = wsize;
+    w->sign = sign_product;
+    if( tmp_limb )
+        mpi_free_limb_space( tmp_limb );
+}
+
+
+void mpi_mulm(struct gcry_mpi *w, struct gcry_mpi *u,struct gcry_mpi *v, struct gcry_mpi *m)
+{
+    mpi_mul(w, u, v);
+    _gcry_mpi_fdiv_r( w, w, m );
+}
+
diff -Nru a/security/rsa/mpi_generic.c b/security/rsa/mpi_generic.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/mpi_generic.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,227 @@
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "foo.h"
+
+
+mpi_limb_t
+_gcry_mpih_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+                                  mpi_ptr_t s2_ptr, mpi_size_t size)
+{
+    mpi_limb_t x, y, cy;
+    mpi_size_t j;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+       the loop becomes faster.  */
+    j = -size;
+
+    /* Offset the base pointers to compensate for the negative indices.  */
+    s1_ptr -= j;
+    s2_ptr -= j;
+    res_ptr -= j;
+
+    cy = 0;
+    do {
+        y = s2_ptr[j];
+        x = s1_ptr[j];
+        y += cy;                  /* add previous carry to subtrahend */
+        cy = y < cy;              /* get out carry from that addition */
+        y = x - y;                /* main subtract */
+        cy += y > x;              /* get out carry from the subtract, combine */
+        res_ptr[j] = y;
+    } while( ++j );
+
+    return cy;
+}
+
+
+mpi_limb_t
+_gcry_mpih_add_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+               mpi_ptr_t s2_ptr, mpi_size_t size)
+{
+    mpi_limb_t x, y, cy;
+    mpi_size_t j;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+       the loop becomes faster.  */
+    j = -size;
+
+    /* Offset the base pointers to compensate for the negative indices. */
+    s1_ptr -= j;
+    s2_ptr -= j;
+    res_ptr -= j;
+
+    cy = 0;
+    do {
+        y = s2_ptr[j];
+        x = s1_ptr[j];
+        y += cy;                  /* add previous carry to one addend */
+        cy = y < cy;              /* get out carry from that addition */
+        y += x;                   /* add other addend */
+        cy += y < x;              /* get out carry from that add, combine */
+        res_ptr[j] = y;
+    } while( ++j );
+
+    return cy;
+}
+
+/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left
+ * and store the USIZE least significant digits of the result at WP.
+ * Return the bits shifted out from the most significant digit.
+ *
+ * Argument constraints:
+ * 1. 0 < CNT < BITS_PER_MP_LIMB
+ * 2. If the result is to be written over the input, WP must be >= UP.
+ */
+
+mpi_limb_t
+_gcry_mpih_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
+                                            unsigned int cnt)
+{
+    mpi_limb_t high_limb, low_limb;
+    unsigned sh_1, sh_2;
+    mpi_size_t i;
+    mpi_limb_t retval;
+
+    sh_1 = cnt;
+    wp += 1;
+    sh_2 = BITS_PER_MPI_LIMB - sh_1;
+    i = usize - 1;
+    low_limb = up[i];
+    retval = low_limb >> sh_2;
+    high_limb = low_limb;
+    while( --i >= 0 ) {
+        low_limb = up[i];
+        wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
+        high_limb = low_limb;
+    }
+    wp[i] = high_limb << sh_1;
+
+    return retval;
+}
+
+
+/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right
+ * and store the USIZE least significant limbs of the result at WP.
+ * The bits shifted out to the right are returned.
+ *
+ * Argument constraints:
+ * 1. 0 < CNT < BITS_PER_MP_LIMB
+ * 2. If the result is to be written over the input, WP must be <= UP.
+ */
+mpi_limb_t
+_gcry_mpih_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt)
+{
+    mpi_limb_t high_limb, low_limb;
+    unsigned sh_1, sh_2;
+    mpi_size_t i;
+    mpi_limb_t retval;
+
+    sh_1 = cnt;
+    wp -= 1;
+    sh_2 = BITS_PER_MPI_LIMB - sh_1;
+    high_limb = up[0];
+    retval = high_limb << sh_2;
+    low_limb = high_limb;
+    for( i=1; i < usize; i++) {
+        high_limb = up[i];
+        wp[i] = (low_limb >> sh_1) | (high_limb << sh_2);
+        low_limb = high_limb;
+    }
+    wp[i] = low_limb >> sh_1;
+
+    return retval;
+}
+
+mpi_limb_t
+_gcry_mpih_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+                  mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+    mpi_limb_t cy_limb;
+    mpi_size_t j;
+    mpi_limb_t prod_high, prod_low;
+    mpi_limb_t x;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+     * the loop becomes faster.  */
+    j = -s1_size;
+    res_ptr -= j;
+    s1_ptr -= j;
+
+    cy_limb = 0;
+    do {
+        umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb);
+
+        prod_low += cy_limb;
+        cy_limb = (prod_low < cy_limb?1:0) + prod_high;
+
+        x = res_ptr[j];
+        prod_low = x - prod_low;
+        cy_limb += prod_low > x?1:0;
+        res_ptr[j] = prod_low;
+    } while( ++j );
+
+    return cy_limb;
+}
+
+
+mpi_limb_t
+_gcry_mpih_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+                                                    mpi_limb_t s2_limb)
+{
+    mpi_limb_t cy_limb;
+    mpi_size_t j;
+    mpi_limb_t prod_high, prod_low;
+
+    /* The loop counter and index J goes from -S1_SIZE to -1.  This way
+     * the loop becomes faster.  */
+    j = -s1_size;
+
+    /* Offset the base pointers to compensate for the negative indices.  */
+    s1_ptr -= j;
+    res_ptr -= j;
+
+    cy_limb = 0;
+    do {
+        umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb );
+        prod_low += cy_limb;
+        cy_limb = (prod_low < cy_limb?1:0) + prod_high;
+        res_ptr[j] = prod_low;
+    } while( ++j );
+
+    return cy_limb;
+}
+
+
+mpi_limb_t
+_gcry_mpih_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+                  mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+    mpi_limb_t cy_limb;
+    mpi_size_t j;
+    mpi_limb_t prod_high, prod_low;
+    mpi_limb_t x;
+
+    /* The loop counter and index J goes from -SIZE to -1.  This way
+     * the loop becomes faster.  */
+    j = -s1_size;
+    res_ptr -= j;
+    s1_ptr -= j;
+
+    cy_limb = 0;
+    do {
+        umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb );
+
+        prod_low += cy_limb;
+        cy_limb = (prod_low < cy_limb?1:0) + prod_high;
+
+        x = res_ptr[j];
+        prod_low = x + prod_low;
+        cy_limb += prod_low < x?1:0;
+        res_ptr[j] = prod_low;
+    } while ( ++j );
+    return cy_limb;
+}
+
+
diff -Nru a/security/rsa/rsa.c b/security/rsa/rsa.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/rsa.c	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,32 @@
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include "gnupg_mpi_mpi.h"
+
+extern char rsa_key_n[];
+extern char rsa_key_e[];
+extern const int rsa_key_n_size;
+extern const int rsa_key_e_size;
+
+static MPI public_key[2];
+
+int rsa_check_sig(char *sig, u8 *sha1)
+{
+	int nread;
+
+	/* initialize our keys */
+	nread = rsa_key_n_size;
+	public_key[0] = mpi_read_from_buffer(&rsa_key_n[0], &nread, 0);
+	nread = rsa_key_e_size;
+	public_key[1] = mpi_read_from_buffer(&rsa_key_e[0], &nread, 0);
+
+
+	return 0;
+}
+EXPORT_SYMBOL(rsa_check_sig);
+
+
diff -Nru a/security/rsa/rsa.h b/security/rsa/rsa.h
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/rsa.h	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,36 @@
+/* rsa.h
+ *	Copyright (C) 1997,1998 by Werner Koch (dd9jn)
+ *	Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+#ifndef G10_RSA_H
+#define G10_RSA_H
+
+int _gcry_rsa_generate( int algo, unsigned nbits, struct gcry_mpi * *skey, struct gcry_mpi * **retfactors );
+int _gcry_rsa_check_secret_key( int algo, struct gcry_mpi * *skey );
+int _gcry_rsa_encrypt( int algo, struct gcry_mpi * *resarr, struct gcry_mpi * data, struct gcry_mpi * *pkey );
+int _gcry_rsa_decrypt( int algo, struct gcry_mpi * *result, struct gcry_mpi * *data, struct gcry_mpi * *skey );
+int _gcry_rsa_sign( int algo, struct gcry_mpi * *resarr, struct gcry_mpi * data, struct gcry_mpi * *skey );
+int _gcry_rsa_verify( int algo, struct gcry_mpi * hash, struct gcry_mpi * *data, struct gcry_mpi * *pkey,
+		    int (*cmp)(void *, struct gcry_mpi *), void *opaquev );
+unsigned _gcry_rsa_get_nbits( int algo, struct gcry_mpi * *pkey );
+const char *_gcry_rsa_get_info( int algo, int *npkey, int *nskey,
+				    int *nenc, int *nsig, int *use );
+
+
+#endif /*G10_RSA_H*/
diff -Nru a/security/rsa/rsa_key.c b/security/rsa/rsa_key.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/rsa_key.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,51 @@
+const char rsa_key_n[] = {
+0x04, 0x00, 0xb6, 0x3a, 0x9f, 0x93, 0x4c, 0x3e, 0xec, 0xb9, 0x0a, 0xd7, 0x87, 0x4b, 0x37, 0xd9, 0x2e, 0xcc, 0xfb, 0x2a, 0xad, 0x51, 0x6b, 0x67, 0x61, 0x23, 0x96, 0x5c, 0x3d, 0xdb, 0x86, 0x9b, 0x5a, 0x67, 0xfe, 0x44, 0x3d, 0x38, 0x50, 0xe5, 0x8d, 0x10, 0xe3, 0x8c, 0x1e, 0x4a, 0xfb, 0xd4, 0x37, 0x4a, 0x81, 0x36, 0xc7, 0x12, 0x0f, 0x22, 0x3d, 0x7d, 0x9e, 0x35, 0x99, 0xb1, 0xbd, 0x24, 0x97, 0x98, 0x4c, 0xb5, 0xc4, 0x5c, 0x7b, 0x44, 0xe7, 0x96, 0x4d, 0x43, 0xb3, 0x7b, 0x2d, 0xb7, 0x13, 0x7c, 0xb7, 0x9a, 0x54, 0xe8, 0xc4, 0x3e, 0xc7, 0x72, 0xba, 0x2e, 0xa3, 0xa6, 0x98, 0xc8, 0x25, 0xd6, 0x9e, 0x45, 0x1b, 0xea, 0xc3, 0x73, 0xc6, 0xad, 0x02, 0xb7, 0x6a, 0x3f, 0xb5, 0x83, 0x38, 0x49, 0x2a, 0x29, 0x1c, 0x1b, 0x52, 0x48, 0x7b, 0x49, 0x40, 0xee, 0x95, 0x3f, 0x2d, 0x78, 0x02, 0xed, };
+
+const int rsa_key_n_size = 130;
+const char rsa_key_e[] = {
+0x00, 0x06, 0x29, };
+const int rsa_key_e_size = 3;
+const char rsa_key[] =
+	"\x98\x8b\x04\x3f\x84\x97\x99\x01\x04\x00\xb6\x3a\x9f\x93\x4c\x3e"
+	"\xec\xb9\x0a\xd7\x87\x4b\x37\xd9\x2e\xcc\xfb\x2a\xad\x51\x6b\x67"
+	"\x61\x23\x96\x5c\x3d\xdb\x86\x9b\x5a\x67\xfe\x44\x3d\x38\x50\xe5"
+	"\x8d\x10\xe3\x8c\x1e\x4a\xfb\xd4\x37\x4a\x81\x36\xc7\x12\x0f\x22"
+	"\x3d\x7d\x9e\x35\x99\xb1\xbd\x24\x97\x98\x4c\xb5\xc4\x5c\x7b\x44"
+	"\xe7\x96\x4d\x43\xb3\x7b\x2d\xb7\x13\x7c\xb7\x9a\x54\xe8\xc4\x3e"
+	"\xc7\x72\xba\x2e\xa3\xa6\x98\xc8\x25\xd6\x9e\x45\x1b\xea\xc3\x73"
+	"\xc6\xad\x02\xb7\x6a\x3f\xb5\x83\x38\x49\x2a\x29\x1c\x1b\x52\x48"
+	"\x7b\x49\x40\xee\x95\x3f\x2d\x78\x02\xed\x00\x06\x29\xb4\x07\x74"
+	"\x65\x73\x74\x6b\x65\x79\x88\xb2\x04\x13\x01\x02\x00\x1c\x05\x02"
+	"\x3f\x84\x97\x99\x02\x1b\x03\x04\x0b\x07\x03\x02\x03\x15\x02\x03"
+	"\x03\x16\x02\x01\x02\x1e\x01\x02\x17\x80\x00\x0a\x09\x10\x82\xbd"
+	"\x9b\x6d\xe6\x83\xcc\x5a\x77\x23\x03\xfe\x2e\x67\x4d\x03\x87\x36"
+	"\x05\x56\xdc\x31\x5f\xa7\x0a\x93\x3b\x74\x1e\x87\xfd\x95\x98\x1f"
+	"\x75\x1d\x20\x77\x1a\x07\xa8\x85\x95\x94\x6c\x9e\x5b\xab\x96\xeb"
+	"\x4d\x46\xbe\xea\xcb\xff\xd8\xac\xa7\xdd\x32\x0e\xb5\xfb\x1d\x10"
+	"\xf7\xb6\xf7\x54\x4c\x4e\xfc\x12\x28\x47\x1c\xd9\x1d\x06\x66\xdb"
+	"\x94\xe3\xde\x62\xfd\xe7\xeb\x2d\xc5\xfb\x11\xa1\x3d\xf1\x4e\xf4"
+	"\x46\x42\x8f\x8b\x86\x78\x08\xb7\x9e\xef\xbf\x88\x72\x7e\x73\x73"
+	"\x30\x92\xa6\xa1\x59\x30\x5e\x94\x51\x39\x92\xab\xd5\xce\xf8\x67"
+	"\x8d\x9c\x37\x85\xdc\x6e\x25\x3c\xc1\x81\xb8\x8b\x04\x3f\x84\x97"
+	"\xb8\x01\x04\x00\xe7\x16\x95\xed\x53\xcc\x3d\x03\xc3\x49\xa9\xf2"
+	"\xa6\x4e\x6c\x74\x5f\xc3\xe1\x53\x14\x4e\x29\xa2\x54\x36\xd6\xc9"
+	"\x47\x66\x40\xb9\xd9\x71\xae\x53\x15\xab\x97\x37\x60\x68\x58\x1c"
+	"\x7b\xe9\xc8\xe1\xf1\x81\xa4\xb0\x06\xd8\x71\x9e\x75\x82\x5e\xe6"
+	"\x1b\x7e\xbd\xb6\xa1\x4f\x4c\x9d\xee\xaf\x83\x32\x8a\xa9\x87\xf6"
+	"\x40\x99\x2c\xe5\x80\x42\x10\x29\xe6\xaf\x72\xcc\xd5\x2b\xf1\x83"
+	"\xe9\xa7\xee\xd1\x07\xc7\xd6\x05\xdd\xdd\x37\x84\x06\xf9\x18\x50"
+	"\x0c\xd1\x83\x76\xe0\xe4\x7d\x31\xdf\x8e\x4a\x2b\xed\x56\x4e\x82"
+	"\xb3\x0f\xf1\x0b\x00\x06\x29\x88\x9f\x04\x18\x01\x02\x00\x09\x05"
+	"\x02\x3f\x84\x97\xb8\x02\x1b\x0c\x00\x0a\x09\x10\x82\xbd\x9b\x6d"
+	"\xe6\x83\xcc\x5a\x61\xcb\x03\xff\x54\x94\xa1\x6f\x90\x56\x32\x12"
+	"\xcf\x2a\xd9\xf6\x3f\xec\xbe\xcc\x47\x6c\xac\x09\x42\xef\x14\xec"
+	"\xe6\xf7\xed\x8c\xf6\x98\x20\xd2\xe1\xb9\x47\xc1\x47\xd4\xcb\xd0"
+	"\x1b\xb1\x6b\x38\xf1\x35\xc1\xfd\x36\xc8\x3f\xfc\xbc\x81\xc6\xa5"
+	"\x14\xcc\x41\x2b\x7c\x69\x78\x9a\x7e\x08\x8c\x3b\x5d\x2b\x86\x50"
+	"\xea\x4c\x7b\x4c\x34\x01\x87\xae\xe6\x04\x77\x93\x7d\xe5\x54\xcc"
+	"\x70\x30\x56\x11\x1b\xaf\x8c\xcf\x4b\x99\x2c\x5a\xa8\x70\x32\xfc"
+	"\xd9\xef\x6f\x3e\x76\x4c\xd5\x97\xdf\x1c\x49\x85\xcf\x24\x01\xed"
+	"\x3c\x8a\x56\x03\x8f\xad\x50\x35"
+	;
+
+const int rsa_key_size = 632;
diff -Nru a/security/rsa/userspace/Makefile b/security/rsa/userspace/Makefile
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/userspace/Makefile	Thu Jan 15 09:29:19 2004
@@ -0,0 +1,30 @@
+# Set the following to `true' to make a debuggable build.
+# Leave this set to `false' for production use.
+DEBUG = true
+
+
+ROOT =		mod
+VERSION =	0.1
+INSTALL_DIR =	/usr/local/bin
+RELEASE_NAME =	$(ROOT)-$(VERSION)
+
+CC = gcc
+
+INCLUDES = 
+CFLAGS = -g -O -Wall
+
+OBJS =	mod.o
+
+all: extract_pkey $(ROOT)
+
+$(ROOT): $(OBJS)
+	$(CC) $(LDFLAGS) -o $(ROOT) $(OBJS) -lbfd -liberty $(LIB_OBJS) $(ARCH_LIB_OBJS)
+
+.c.o:
+	$(CC) $(INCLUDES) $(CFLAGS) -c $< -o $@
+
+extract_pkey: extract_pkey.c
+	gcc -g extract_pkey.c -o extract_pkey
+
+clean:
+	-rm $(OBJS) $(ROOT) extract_pkey
diff -Nru a/security/rsa/userspace/extract_pkey.c b/security/rsa/userspace/extract_pkey.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/userspace/extract_pkey.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,139 @@
+/*
+ * Distributed Security Module (DSM)
+ *
+ * Simple extractor of MPIs for e and n in gpg exported keys (or pubrings
+ * with only one key apparently)
+ * Exported keys come from gpg --export.
+ * Output is meant to be copy pasted into kernel code until better mechanism.
+ *	
+ * Copyright (C) 2002-2003 Ericsson, Inc
+ *
+ *      This program is free software; you can redistribute it and/or modify
+ *      it under the terms of the GNU General Public License as published by
+ *      the Free Software Foundation; either version 2 of the License, or
+ *      (at your option) any later version.
+ * 
+ * Author: David Gordon Aug 2003 
+ * Modifs: Vincent Roy Sept 2003 
+ */
+
+#include <errno.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define DSI_ELF_SIG_SIZE 512               /* this is a redefinition */
+#define DSI_PKEY_N_OFFSET        8         /* offset for pkey->n */
+#define DSI_MPI_MAX_SIZE_N 1024 
+/* pkey->e MPI follows pkey->n MPI */
+
+
+void usage();
+
+
+int main(int argc, char **argv)
+{
+	int pkey_file, module_file, nread, count, i;
+	unsigned int num;
+	unsigned int len;
+	unsigned char c;
+	unsigned char *key;
+	unsigned char *temp;
+	int key_offset = 0;
+
+	if (argc <= 1)
+		usage();
+
+	/* Get pkey MPIs, one for 'n' and the other for 'e' */
+	pkey_file = open(argv[1], O_RDONLY);
+	if (!pkey_file) {
+		printf ("Unable to open pkey_file %s %s\n", argv[1], strerror(errno));
+		return -1;
+	}
+//	module_file = open ("/dev/Digsig", O_WRONLY);
+//	if (!module_file) {
+//		printf ("Unable to open module char device %s\n", strerror(errno));
+//		return -1;
+//	}
+  
+	key = (unsigned char *)malloc (DSI_MPI_MAX_SIZE_N+1);
+//	key[key_offset++] = 'n';
+	/*
+	 * Format of an MPI:
+	 * - 2 bytes: length of MPI in BITS
+	 * - MPI
+	 */
+
+	lseek(pkey_file, DSI_PKEY_N_OFFSET, SEEK_SET);
+	read(pkey_file, &c, 1);
+	key[key_offset++] = c;
+	len = c << 8;
+
+	read(pkey_file, &c, 1);
+	key[key_offset++] = c;
+	len |= c;
+	len = (len + 7) / 8;   /* round up */
+
+	if (len > DSI_ELF_SIG_SIZE) {
+		printf("\nLength of 'n' MPI is too large %#x\n", len);
+		return -1;
+	}
+
+	for (i = 0; i < len; i++) {
+		read(pkey_file, &c, 1);
+		key[key_offset++] = c;
+		if (key_offset == DSI_MPI_MAX_SIZE_N+2) {
+			temp = (unsigned char *)malloc (DSI_MPI_MAX_SIZE_N*2+1);
+			memcpy (temp, key, key_offset-1);
+			free (key);
+			key = temp;
+		}
+	}
+	printf("const char rsa_key_n[] = {\n");
+	for (i = 0; i < key_offset; i++  ) {
+		printf("0x%02x, ", key[i] & 0xff);
+	}
+	printf("};\n\n");
+	printf("const int rsa_key_n_size = %d;\n", key_offset);
+//	write (module_file, key, key_offset);
+
+	key_offset = 0;
+//	key[key_offset++] = 'e';
+	read(pkey_file, &c, 1);
+	key[key_offset++] = c;
+	len = c << 8;
+
+	read(pkey_file, &c, 1);
+	key[key_offset++] = c;
+	len |= c;
+	len = (len + 7) / 8;   /* round up */
+
+	if (len > DSI_ELF_SIG_SIZE) {
+		printf("\nLength of 'e' MPI is too large %#x\n", len);
+		return -1;
+	}
+
+	for (i = 0; i < len; i++) {
+		read(pkey_file, &c, 1);
+		key[key_offset++] = c;
+	}
+
+	printf("const char rsa_key_e[] = {\n");
+	for (i = 0; i < key_offset; i++  ) {
+		printf("0x%02x, ", key[i] & 0xff);
+	}
+	printf("};\n");
+	printf("const int rsa_key_e_size = %d;\n", key_offset);
+//	write (module_file, key, key_offset);
+
+	return 0;
+}
+
+
+void usage() 
+{
+	printf("Usage: extract_pkey gpgkey\nYou can export a key with gpg --export\n");
+}
+
+
diff -Nru a/security/rsa/userspace/mod.c b/security/rsa/userspace/mod.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/userspace/mod.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,101 @@
+
+
+
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <bfd.h>
+
+/* This should be set in a Makefile somehow */
+#define TARGET "i686-pc-linux-gnu"
+
+static FILE *out;
+
+static void dump_data(bfd *abfd)
+{
+	asection *section;
+	bfd_byte *data = 0;
+	bfd_size_type addr_offset;
+	bfd_size_type stop_offset;
+	unsigned int opb = bfd_octets_per_byte(abfd);
+
+	for (section = abfd->sections; section != NULL; section = section->next) {
+		if (section->flags & SEC_HAS_CONTENTS) {
+			if (bfd_section_size(abfd, section) == 0)
+				continue;
+
+			/* We only care about sections with "text" or "data" in their names */
+			if ((strstr(section->name, "text") == NULL) &&
+			    (strstr(section->name, "data") == NULL))
+				continue;
+
+			printf ("Contents of section %s size %d:\n", section->name, (int)bfd_section_size(abfd, section));
+
+			data = (bfd_byte *) malloc((size_t) bfd_section_size (abfd, section));
+
+			bfd_get_section_contents(abfd, section, (PTR) data, 0, bfd_section_size (abfd, section));
+
+			stop_offset = bfd_section_size(abfd, section) / opb;
+
+			for (addr_offset = 0; addr_offset < stop_offset; ++addr_offset) {
+				fprintf (out, "%c", (data[addr_offset]));
+//				fprintf (out, "%02x", (unsigned)(data[addr_offset]));
+			}
+			free (data);
+//			printf('\n');
+		}
+	}
+}
+
+void set_default_bfd_target(void)
+{
+	const char *target = TARGET;
+
+	if (!bfd_set_default_target(target))
+		fprintf(stderr, "can't set BFD default target to `%s': %s", target, bfd_errmsg (bfd_get_error ()));
+}
+
+int main (int argc, char *argv[])
+{
+	char *in_file;
+	char *out_file;
+	bfd *file;
+	char **matching;
+
+	if (argc != 3) {
+		fprintf(stderr, "%s [infile] [outfile]\n", argv[0]);
+		exit(1);
+	}
+
+	in_file = argv[1];
+	out_file = argv[2];
+
+	bfd_init();
+	set_default_bfd_target();
+
+
+//	file = bfd_openr(in_file, "elf32-i386");
+	file = bfd_openr(in_file, NULL);
+	if (file == NULL) {
+		fprintf(stderr, "error \"%s\" trying to open %s\n", strerror(errno), in_file);
+		exit(1);
+	}
+
+	out = fopen(out_file, "w");
+	if (out == NULL) {
+		fprintf(stderr, "error \"%s\" trying to create %s\n", strerror(errno), out_file);
+		exit(1);
+	}
+
+	if (bfd_check_format_matches(file, bfd_object, &matching)) {
+		dump_data (file);
+	}
+
+	fclose(out);
+
+	return 0;
+}
+
diff -Nru a/security/rsa/userspace/mod_elf.c b/security/rsa/userspace/mod_elf.c
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/userspace/mod_elf.c	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,139 @@
+#include <argp.h>
+#include <assert.h>
+#include <dwarf.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <gelf.h>
+#include <inttypes.h>
+#include <langinfo.h>
+//#include <libdwarf.h>
+#include <libebl.h>
+//#include <libintl.h>
+#include <locale.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/param.h>
+
+
+size_t section_num;
+
+/* Print the section headers.  */
+static void print_shdr (Elf *elf, GElf_Ehdr *ehdr)
+{
+	size_t cnt;
+	size_t shstrndx;
+
+	/* Get the section header string table index.  */
+	if (elf_getshstrndx(elf, &shstrndx) < 0) {
+		fprintf(stderr, "cannot get section header string table index");
+		return;
+	}
+
+	for (cnt = 0; cnt < section_num; ++cnt) {
+		char flagbuf[20];
+		char *cp;
+		Elf_Scn *scn = elf_getscn(elf, cnt);
+		GElf_Shdr shdr_mem;
+		GElf_Shdr *shdr;
+		char *name;
+		char *buf;
+		int i;
+
+		if (scn == NULL)
+			error (EXIT_FAILURE, 0, "cannot get section: %s", elf_errmsg (-1));
+
+		/* Get the section header.  */
+		shdr = gelf_getshdr (scn, &shdr_mem);
+		if (shdr == NULL)
+			error (EXIT_FAILURE, 0, "cannot get section header: %s", elf_errmsg (-1));
+
+		name = elf_strptr(elf, shstrndx, shdr->sh_name);
+		buf = elf_strptr(elf, shstrndx, shdr->sh_addr);
+		printf("%s - ", name);
+		for (i = 0; i < 200; ++i) {
+			printf("%c", buf[i]);
+		}
+		printf("\n");
+
+		//if (shdr->sh_flags & SHF_ALLOC)
+      printf ("[%2zu] %-20s %-12s %0*" PRIx64 " %0*" PRIx64 " %0*" PRIx64
+	      " %2" PRId64 " %-5s %2" PRId32 " %3" PRId32
+	      " %2" PRId64 "\n",
+	      cnt,
+	      elf_strptr(elf, shstrndx, shdr->sh_name)
+	      ?: "<corrupt>",
+	      //ebl_section_type_name (ebl, shdr->sh_type, buf, sizeof (buf)),
+	      "foobar",
+	      ehdr->e_ident[EI_CLASS] == ELFCLASS32 ? 8 : 16, shdr->sh_addr,
+	      ehdr->e_ident[EI_CLASS] == ELFCLASS32 ? 6 : 8, shdr->sh_offset,
+	      ehdr->e_ident[EI_CLASS] == ELFCLASS32 ? 6 : 8, shdr->sh_size,
+	      shdr->sh_entsize, flagbuf, shdr->sh_link, shdr->sh_info,
+	      shdr->sh_addralign);
+    }
+
+  fputc_unlocked ('\n', stdout);
+}
+
+
+
+
+
+int main (int argc, char *argv[])
+{
+	char *filename = "foo";
+	int fd;
+	Elf *elf;
+
+	elf_version(EV_CURRENT);
+
+	fd = open(filename, O_RDONLY);
+	if (fd < 0) {
+		fprintf(stderr, "error \"%s\" trying to open %s\n", strerror(errno), filename);
+		exit(1);
+	}
+
+	elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+	if (elf == NULL) {
+		fprintf(stderr, "Can't get elf descriptor for %s\n", filename);
+		goto exit;
+	}
+
+	Elf_Kind kind = elf_kind(elf);
+	switch(kind) {
+		case ELF_K_ELF:
+				printf("ELF_K_ELF\n");
+				break;
+		default:
+				fprintf(stderr, "Not a proper elf file for us to process.\n");
+				goto end;
+	}
+
+	GElf_Ehdr ehdr_mem;
+	GElf_Ehdr *ehdr = gelf_getehdr(elf, &ehdr_mem);
+	if (ehdr == NULL) {
+		fprintf(stderr, "Can not read elf header.\n");
+		goto end;
+	}
+
+	if (elf_getshnum(elf, &section_num) < 0) {
+		fprintf(stderr, "Can not determine number of sections.\n");
+		goto end;
+	}
+
+	printf("section_num = %d\n", section_num);
+
+	print_shdr(elf, ehdr);
+
+
+end:	
+
+	elf_end(elf);
+exit:
+	close(fd);
+
+	return 0;
+}
+
diff -Nru a/security/rsa/userspace/sign b/security/rsa/userspace/sign
--- /dev/null	Wed Dec 31 16:00:00 1969
+++ b/security/rsa/userspace/sign	Thu Jan 15 09:29:18 2004
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+
+if [ $# = "0" ] ; then
+	echo
+#	echo "usage: $0 <module_to_sign> <key_name>"
+	echo "usage: $0 <module_to_sign>"
+	echo
+	exit 1
+fi
+
+module=$1
+#key=$2
+
+# strip out only the sections that we care about
+./mod $module $module.out
+
+# sign the sections
+gpg --no-greeting -sb $module.out
+## sha1 the sections
+#sha1sum $module.out | awk "{print \$1}" > $module.sha1
+#
+## encrypt the sections
+#gpg --no-greeting -e -o - -r $key $module.sha1 > $module.crypt
+
+# add the encrypted data to the module
+#objcopy --add-section module_sig=$module.crypt $module $module.sign
+objcopy --add-section module_sig=$module.out.sig $module $module.sign
+