arch/alpha/kernel/smp.c                       |    8 -
 arch/i386/Kconfig                             |    4 
 arch/i386/kernel/apic.c                       |    2 
 arch/i386/kernel/cpu/proc.c                   |    2 
 arch/i386/kernel/cpuid.c                      |    2 
 arch/i386/kernel/io_apic.c                    |   76 +++++++------
 arch/i386/kernel/irq.c                        |   57 ++++++---
 arch/i386/kernel/kgdb_stub.c                  |   10 -
 arch/i386/kernel/ldt.c                        |    4 
 arch/i386/kernel/mpparse.c                    |    6 -
 arch/i386/kernel/msr.c                        |    2 
 arch/i386/kernel/reboot.c                     |    2 
 arch/i386/kernel/smp.c                        |   85 ++++++++------
 arch/i386/kernel/smpboot.c                    |   80 +++++++------
 arch/i386/mach-generic/bigsmp.c               |    3 
 arch/i386/mach-generic/default.c              |    3 
 arch/i386/mach-generic/probe.c                |    3 
 arch/i386/mach-generic/summit.c               |    3 
 arch/i386/mach-visws/mpparse.c                |    6 -
 arch/i386/mach-voyager/voyager_basic.c        |    0 
 arch/i386/mach-voyager/voyager_smp.c          |  124 ++++++++++-----------
 arch/ia64/kernel/iosapic.c                    |    8 -
 arch/ia64/kernel/irq.c                        |   70 ++++++++----
 arch/ia64/kernel/perfmon.c                    |   12 --
 arch/ia64/kernel/setup.c                      |    2 
 arch/ia64/kernel/smp.c                        |    2 
 arch/ia64/kernel/smpboot.c                    |   40 +++---
 arch/ia64/kernel/time.c                       |    4 
 arch/mips/kernel/irq.c                        |   38 +++++-
 arch/mips/kernel/proc.c                       |    2 
 arch/mips/kernel/smp.c                        |    4 
 arch/mips/sgi-ip27/ip27-init.c                |    2 
 arch/mips/sibyte/cfe/smp.c                    |    2 
 arch/mips/sibyte/sb1250/smp.c                 |    4 
 arch/mips64/kernel/irq.c                      |   53 ++++++---
 arch/mips64/kernel/proc.c                     |    2 
 arch/mips64/kernel/smp.c                      |    4 
 arch/parisc/kernel/smp.c                      |   19 +--
 arch/ppc/kernel/irq.c                         |   58 +++++++---
 arch/ppc/kernel/setup.c                       |    2 
 arch/ppc/kernel/smp.c                         |    8 -
 arch/ppc64/Kconfig                            |    2 
 arch/ppc64/kernel/htab.c                      |    4 
 arch/ppc64/kernel/irq.c                       |   41 +++++--
 arch/ppc64/kernel/open_pic.c                  |   15 +-
 arch/ppc64/kernel/open_pic.h                  |    1 
 arch/ppc64/kernel/pacaData.c                  |   66 +++++++++++
 arch/ppc64/kernel/prom.c                      |    2 
 arch/ppc64/kernel/rtasd.c                     |    2 
 arch/ppc64/kernel/setup.c                     |    2 
 arch/ppc64/kernel/smp.c                       |    6 -
 arch/ppc64/kernel/xics.c                      |   13 +-
 arch/ppc64/mm/init.c                          |   14 +-
 arch/s390/kernel/setup.c                      |    2 
 arch/s390/kernel/smp.c                        |   16 +-
 arch/sparc64/kernel/irq.c                     |    8 -
 arch/sparc64/kernel/smp.c                     |   53 ++++-----
 arch/um/kernel/irq.c                          |   41 ++++---
 arch/um/kernel/skas/process_kern.c            |    2 
 arch/um/kernel/smp.c                          |   32 ++---
 arch/um/kernel/tt/process_kern.c              |    2 
 arch/um/kernel/um_arch.c                      |    2 
 arch/x86_64/kernel/apic.c                     |    6 -
 arch/x86_64/kernel/io_apic.c                  |   10 -
 arch/x86_64/kernel/irq.c                      |   51 ++++++--
 arch/x86_64/kernel/mpparse.c                  |    4 
 arch/x86_64/kernel/msr.c                      |    2 
 arch/x86_64/kernel/reboot.c                   |    2 
 arch/x86_64/kernel/setup.c                    |    2 
 arch/x86_64/kernel/smp.c                      |   19 +--
 arch/x86_64/kernel/smpboot.c                  |   66 +++++------
 drivers/base/node.c                           |   13 ++
 drivers/s390/char/sclp.c                      |    6 -
 include/asm-alpha/smp.h                       |   24 ----
 include/asm-generic/cpumask_arith.h           |   61 ++++++++++
 include/asm-generic/cpumask_array.h           |   41 +++++++
 include/asm-generic/cpumask_const_reference.h |   29 +++++
 include/asm-generic/cpumask_const_value.h     |   21 +++
 include/asm-generic/cpumask_up.h              |   60 ++++++++++
 include/asm-i386/atomic.h                     |    2 
 include/asm-i386/bitops.h                     |   12 +-
 include/asm-i386/genapic.h                    |   24 ++--
 include/asm-i386/highmem.h                    |    5 
 include/asm-i386/hw_irq.h                     |   44 +++----
 include/asm-i386/mach-bigsmp/mach_apic.h      |   31 ++---
 include/asm-i386/mach-bigsmp/mach_ipi.h       |    9 -
 include/asm-i386/mach-default/mach_apic.h     |   35 +++---
 include/asm-i386/mach-default/mach_ipi.h      |    4 
 include/asm-i386/mach-es7000/mach_apic.h      |   41 +++----
 include/asm-i386/mach-es7000/mach_ipi.h       |   11 +
 include/asm-i386/mach-numaq/mach_apic.h       |   35 +++---
 include/asm-i386/mach-numaq/mach_ipi.h        |    9 -
 include/asm-i386/mach-summit/mach_apic.h      |   27 ++--
 include/asm-i386/mach-summit/mach_ipi.h       |    9 -
 include/asm-i386/mach-visws/mach_apic.h       |   19 +--
 include/asm-i386/mmu_context.h                |    6 -
 include/asm-i386/mpspec.h                     |    3 
 include/asm-i386/numaq.h                      |    4 
 include/asm-i386/smp.h                        |   29 -----
 include/asm-i386/topology.h                   |   13 +-
 include/asm-ia64/bitops.h                     |    2 
 include/asm-ia64/smp.h                        |   22 ---
 include/asm-mips/smp.h                        |   48 --------
 include/asm-mips64/smp.h                      |   48 --------
 include/asm-parisc/smp.h                      |   19 ---
 include/asm-ppc/smp.h                         |   22 ---
 include/asm-ppc64/mmu_context.h               |    2 
 include/asm-ppc64/smp.h                       |   19 ---
 include/asm-ppc64/tlb.h                       |    6 -
 include/asm-s390/smp.h                        |   26 ----
 include/asm-sparc/smp.h                       |    1 
 include/asm-sparc64/smp.h                     |   14 --
 include/asm-um/smp.h                          |   12 --
 include/asm-x86_64/mpspec.h                   |    2 
 include/asm-x86_64/smp.h                      |   36 +-----
 include/asm-x86_64/topology.h                 |    4 
 include/linux/bitmap.h                        |  149 ++++++++++++++++++++++++++
 include/linux/cpumask.h                       |   62 ++++++++++
 include/linux/init_task.h                     |    2 
 include/linux/irq.h                           |    3 
 include/linux/node.h                          |    3 
 include/linux/rcupdate.h                      |    5 
 include/linux/sched.h                         |    9 -
 include/linux/smp.h                           |    3 
 include/linux/topology.h                      |    8 +
 kernel/fork.c                                 |    2 
 kernel/module.c                               |    6 -
 kernel/rcupdate.c                             |   12 +-
 kernel/sched.c                                |   36 +++---
 kernel/softirq.c                              |    5 
 kernel/workqueue.c                            |    2 
 mm/vmscan.c                                   |    4 
 132 files changed, 1539 insertions(+), 988 deletions(-)

diff -puN arch/alpha/kernel/smp.c~cpumask_t-1 arch/alpha/kernel/smp.c
--- 25/arch/alpha/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/alpha/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -71,7 +71,7 @@ static int smp_secondary_alive __initdat
 
 /* Which cpus ids came online.  */
 unsigned long cpu_present_mask;
-volatile unsigned long cpu_online_map;
+cpumask_t cpu_online_map;
 
 /* cpus reported in the hwrpb */
 static unsigned long hwrpb_cpu_present_mask __initdata = 0;
@@ -132,7 +132,7 @@ smp_callin(void)
 {
 	int cpuid = hard_smp_processor_id();
 
-	if (test_and_set_bit(cpuid, &cpu_online_map)) {
+	if (cpu_test_and_set(cpuid, cpu_online_map)) {
 		printk("??, cpu 0x%x already present??\n", cpuid);
 		BUG();
 	}
@@ -575,8 +575,8 @@ smp_prepare_boot_cpu(void)
 	/*
 	 * Mark the boot cpu (current cpu) as both present and online
 	 */ 
-	set_bit(smp_processor_id(), &cpu_present_mask);
-	set_bit(smp_processor_id(), &cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_present_mask);
+	cpu_set(smp_processor_id(), cpu_online_map);
 }
 
 int __devinit
diff -puN arch/i386/Kconfig~cpumask_t-1 arch/i386/Kconfig
--- 25/arch/i386/Kconfig~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/Kconfig	2003-07-03 01:19:48.000000000 -0700
@@ -438,9 +438,9 @@ config SMP
 	  If you don't know what to do here, say N.
 
 config NR_CPUS
-	int "Maximum number of CPUs (2-32)"
+	int "Maximum number of CPUs (2-255)"
 	depends on SMP
-	default "32"
+	default "8"
 	help
 	  This allows you to specify the maximum number of CPUs which this
 	  kernel will support.  The maximum supported value is 32 and the
diff -puN arch/i386/kernel/apic.c~cpumask_t-1 arch/i386/kernel/apic.c
--- 25/arch/i386/kernel/apic.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/apic.c	2003-07-03 01:19:48.000000000 -0700
@@ -1137,7 +1137,7 @@ int __init APIC_init_uniprocessor (void)
 
 	connect_bsp_APIC();
 
-	phys_cpu_present_map = 1 << boot_cpu_physical_apicid;
+	phys_cpu_present_map = cpumask_of_cpu(boot_cpu_physical_apicid);
 
 	setup_local_APIC();
 
diff -puN arch/i386/kernel/cpuid.c~cpumask_t-1 arch/i386/kernel/cpuid.c
--- 25/arch/i386/kernel/cpuid.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/cpuid.c	2003-07-03 01:19:48.000000000 -0700
@@ -136,7 +136,7 @@ static int cpuid_open(struct inode *inod
   int cpu = minor(file->f_dentry->d_inode->i_rdev);
   struct cpuinfo_x86 *c = &(cpu_data)[cpu];
 
-  if ( !(cpu_online_map & (1UL << cpu)) )
+  if (!cpu_online(cpu))
     return -ENXIO;		/* No such CPU */
   if ( c->cpuid_level < 0 )
     return -EIO;		/* CPUID not supported */
diff -puN arch/i386/kernel/cpu/proc.c~cpumask_t-1 arch/i386/kernel/cpu/proc.c
--- 25/arch/i386/kernel/cpu/proc.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/cpu/proc.c	2003-07-03 01:19:48.000000000 -0700
@@ -60,7 +60,7 @@ static int show_cpuinfo(struct seq_file 
 	int fpu_exception;
 
 #ifdef CONFIG_SMP
-	if (!(cpu_online_map & (1<<n)))
+	if (!cpu_online(n))
 		return 0;
 #endif
 	seq_printf(m, "processor\t: %d\n"
diff -puN arch/i386/kernel/io_apic.c~cpumask_t-1 arch/i386/kernel/io_apic.c
--- 25/arch/i386/kernel/io_apic.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/io_apic.c	2003-07-03 01:19:48.000000000 -0700
@@ -249,14 +249,14 @@ static void clear_IO_APIC (void)
 			clear_IO_APIC_pin(apic, pin);
 }
 
-static void set_ioapic_affinity (unsigned int irq, unsigned long cpu_mask)
+static void set_ioapic_affinity(unsigned int irq, cpumask_t cpumask)
 {
 	unsigned long flags;
 	int pin;
 	struct irq_pin_list *entry = irq_2_pin + irq;
 	unsigned int apicid_value;
 	
-	apicid_value = cpu_mask_to_apicid(cpu_mask);
+	apicid_value = cpu_mask_to_apicid(mk_cpumask_const(cpumask));
 	/* Prepare to do the io_apic_write */
 	apicid_value = apicid_value << 24;
 	spin_lock_irqsave(&ioapic_lock, flags);
@@ -286,9 +286,9 @@ static void set_ioapic_affinity (unsigne
 #  define Dprintk(x...) 
 # endif
 
-extern unsigned long irq_affinity[NR_IRQS];
+extern cpumask_t irq_affinity[NR_IRQS];
 
-static int __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
+static cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
 
 #define IRQBALANCE_CHECK_ARCH -999
 static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
@@ -307,8 +307,7 @@ struct irq_cpu_info {
 #define IDLE_ENOUGH(cpu,now) \
 		(idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1))
 
-#define IRQ_ALLOWED(cpu,allowed_mask) \
-		((1 << cpu) & (allowed_mask))
+#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
 
 #define CPU_TO_PACKAGEINDEX(i) \
 		((physical_balance && i > cpu_sibling_map[i]) ? cpu_sibling_map[i] : i)
@@ -320,7 +319,7 @@ struct irq_cpu_info {
 
 long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
 
-static unsigned long move(int curr_cpu, unsigned long allowed_mask,
+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
 			unsigned long now, int direction)
 {
 	int search_idle = 1;
@@ -350,20 +349,20 @@ inside:
 static inline void balance_irq(int cpu, int irq)
 {
 	unsigned long now = jiffies;
-	unsigned long allowed_mask;
+	cpumask_t allowed_mask;
 	unsigned int new_cpu;
 		
 	if (irqbalance_disabled)
 		return; 
 
-	allowed_mask = cpu_online_map & irq_affinity[irq];
+	cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
 	new_cpu = move(cpu, allowed_mask, now, 1);
 	if (cpu != new_cpu) {
 		irq_desc_t *desc = irq_desc + irq;
 		unsigned long flags;
 
 		spin_lock_irqsave(&desc->lock, flags);
-		pending_irq_balance_cpumask[irq] = 1 << new_cpu;
+		pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
 		spin_unlock_irqrestore(&desc->lock, flags);
 	}
 }
@@ -399,8 +398,7 @@ static void do_irq_balance(void)
 	int tmp_loaded, first_attempt = 1;
 	unsigned long tmp_cpu_irq;
 	unsigned long imbalance = 0;
-	unsigned long allowed_mask;
-	unsigned long target_cpu_mask;
+	cpumask_t allowed_mask, target_cpu_mask, tmp;
 
 	for (i = 0; i < NR_CPUS; i++) {
 		int package_index;
@@ -549,10 +547,11 @@ tryanotherirq:
 					CPU_IRQ(cpu_sibling_map[min_loaded]))
 		min_loaded = cpu_sibling_map[min_loaded];
 
-	allowed_mask = cpu_online_map & irq_affinity[selected_irq];
-	target_cpu_mask = 1 << min_loaded;
+	cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
+	target_cpu_mask = cpumask_of_cpu(min_loaded);
+	cpus_and(tmp, target_cpu_mask, allowed_mask);
 
-	if (target_cpu_mask & allowed_mask) {
+	if (!cpus_empty(tmp)) {
 		irq_desc_t *desc = irq_desc + selected_irq;
 		unsigned long flags;
 
@@ -560,7 +559,8 @@ tryanotherirq:
 				selected_irq, min_loaded);
 		/* mark for change destination */
 		spin_lock_irqsave(&desc->lock, flags);
-		pending_irq_balance_cpumask[selected_irq] = 1 << min_loaded;
+		pending_irq_balance_cpumask[selected_irq] =
+					cpumask_of_cpu(min_loaded);
 		spin_unlock_irqrestore(&desc->lock, flags);
 		/* Since we made a change, come back sooner to 
 		 * check for more variation.
@@ -591,8 +591,9 @@ int balanced_irq(void *unused)
 	daemonize("kirqd");
 	
 	/* push everything to CPU 0 to give us a starting point.  */
-	for (i = 0 ; i < NR_IRQS ; i++)
-		pending_irq_balance_cpumask[i] = 1;
+	for (i = 0 ; i < NR_IRQS ; i++) {
+		pending_irq_balance_cpumask[i] = cpumask_of_cpu(0);
+	}
 
 repeat:
 	set_current_state(TASK_INTERRUPTIBLE);
@@ -611,7 +612,9 @@ static int __init balanced_irq_init(void
 {
 	int i;
 	struct cpuinfo_x86 *c;
+	cpumask_t tmp;
 
+	cpus_shift_right(tmp, cpu_online_map, 2);
         c = &boot_cpu_data;
 	/* When not overwritten by the command line ask subarchitecture. */
 	if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
@@ -628,7 +631,7 @@ static int __init balanced_irq_init(void
 	 * Enable physical balance only if more than 1 physical processor
 	 * is present
 	 */
-	if (smp_num_siblings > 1 && cpu_online_map >> 2)
+	if (smp_num_siblings > 1 && !cpus_empty(tmp))
 		physical_balance = 1;
 
 	for (i = 0; i < NR_CPUS; i++) {
@@ -667,14 +670,14 @@ static int __init irqbalance_disable(cha
 
 __setup("noirqbalance", irqbalance_disable);
 
-static void set_ioapic_affinity (unsigned int irq, unsigned long mask);
+static void set_ioapic_affinity(unsigned int irq, cpumask_t mask);
 
 static inline void move_irq(int irq)
 {
 	/* note - we hold the desc->lock */
-	if (unlikely(pending_irq_balance_cpumask[irq])) {
+	if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) {
 		set_ioapic_affinity(irq, pending_irq_balance_cpumask[irq]);
-		pending_irq_balance_cpumask[irq] = 0;
+		cpus_clear(pending_irq_balance_cpumask[irq]);
 	}
 }
 
@@ -822,7 +825,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, 
  * we need to reprogram the ioredtbls to cater for the cpus which have come online
  * so mask in all cases should simply be TARGET_CPUS
  */
-void __init setup_ioapic_dest (unsigned long mask)
+void __init setup_ioapic_dest(cpumask_t mask)
 {
 	int pin, ioapic, irq, irq_entry;
 
@@ -1598,7 +1601,7 @@ void disable_IO_APIC(void)
 static void __init setup_ioapic_ids_from_mpc(void)
 {
 	union IO_APIC_reg_00 reg_00;
-	unsigned long phys_id_present_map;
+	cpumask_t phys_id_present_map;
 	int apic;
 	int i;
 	unsigned char old_id;
@@ -1608,7 +1611,12 @@ static void __init setup_ioapic_ids_from
 		/* This gets done during IOAPIC enumeration for ACPI. */
 		return;
 
-	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
+	/*
+	 * This is broken; anything with a real cpu count has to
+	 * circumvent this idiocy regardless.
+	 */
+	phys_id_present_map =
+		ioapic_phys_id_map(mk_cpumask_const(phys_cpu_present_map));
 
 	/*
 	 * Set the IOAPIC ID to the value stored in the MPC table.
@@ -1640,17 +1648,19 @@ static void __init setup_ioapic_ids_from
 			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
 				apic, mp_ioapics[apic].mpc_apicid);
 			for (i = 0; i < 0xf; i++)
-				if (!(phys_id_present_map & (1 << i)))
+				if (!cpu_isset(i, phys_id_present_map))
 					break;
 			if (i >= 0xf)
 				panic("Max APIC ID exceeded!\n");
 			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 				i);
-			phys_id_present_map |= 1 << i;
+			cpu_set(i, phys_id_present_map);
 			mp_ioapics[apic].mpc_apicid = i;
 		} else {
+			cpumask_t tmp;
+			tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
 			printk("Setting %d in the phys_id_present_map\n", mp_ioapics[apic].mpc_apicid);
-			phys_id_present_map |= apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
+			cpus_or(phys_id_present_map, phys_id_present_map, tmp);
 		}
 
 
@@ -2220,7 +2230,8 @@ late_initcall(io_apic_bug_finalize);
 int __init io_apic_get_unique_id (int ioapic, int apic_id)
 {
 	union IO_APIC_reg_00 reg_00;
-	static unsigned long apic_id_map = 0;
+	static cpumask_t apic_id_map = CPU_MASK_NONE;
+	cpumask_t tmp;
 	unsigned long flags;
 	int i = 0;
 
@@ -2233,8 +2244,8 @@ int __init io_apic_get_unique_id (int io
 	 *      advantage of new APIC bus architecture.
 	 */
 
-	if (!apic_id_map)
-		apic_id_map = phys_cpu_present_map;
+	if (cpus_empty(apic_id_map))
+		apic_id_map = ioapic_phys_id_map(mk_cpumask_const(phys_cpu_present_map));
 
 	spin_lock_irqsave(&ioapic_lock, flags);
 	reg_00.raw = io_apic_read(ioapic, 0);
@@ -2266,7 +2277,8 @@ int __init io_apic_get_unique_id (int io
 		apic_id = i;
 	} 
 
-	apic_id_map |= apicid_to_cpu_present(apic_id);
+	tmp = apicid_to_cpu_present(apic_id);
+	cpus_or(apic_id_map, apic_id_map, tmp);
 
 	if (reg_00.bits.ID != apic_id) {
 		reg_00.bits.ID = apic_id;
diff -puN arch/i386/kernel/irq.c~cpumask_t-1 arch/i386/kernel/irq.c
--- 25/arch/i386/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -45,8 +45,6 @@
 #include <asm/desc.h>
 #include <asm/irq.h>
 
-
-
 /*
  * Linux has a controller-independent x86 interrupt architecture.
  * every controller has a 'controller-template', that is used
@@ -890,13 +888,13 @@ int setup_irq(unsigned int irq, struct i
 static struct proc_dir_entry * root_irq_dir;
 static struct proc_dir_entry * irq_dir [NR_IRQS];
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
-static unsigned int parse_hex_value (const char __user *buffer,
-		unsigned long count, unsigned long *ret)
+static unsigned int parse_hex_value(const char __user *buffer,
+		unsigned long count, cpumask_t *ret)
 {
-	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	unsigned char hexnum[HEX_DIGITS];
+	cpumask_t value = CPU_MASK_NONE;
 	int i;
 
 	if (!count)
@@ -910,10 +908,10 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
 
 	for (i = 0; i < count; i++) {
 		unsigned int c = hexnum[i];
+		int k;
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -922,7 +920,10 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 4);
+		for (k = 0; k < 4; ++k)
+			if (test_bit(k, (unsigned long *)&c))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -931,22 +932,35 @@ out:
 
 #ifdef CONFIG_SMP
 
-static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
+
+cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
 
-unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
-static int irq_affinity_read_proc (char *page, char **start, off_t off,
+static int irq_affinity_read_proc(char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	int k, len;
+	cpumask_t tmp = irq_affinity[(long)data];
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
+
+	len = 0;
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
-static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
+static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
 					unsigned long count, void *data)
 {
-	int irq = (long) data, full_count = count, err;
-	unsigned long new_value;
+	int irq = (long)data, full_count = count, err;
+	cpumask_t new_value, tmp;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -958,11 +972,13 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	irq_affinity[irq] = new_value;
-	irq_desc[irq].handler->set_affinity(irq, new_value);
+	irq_desc[irq].handler->set_affinity(irq,
+					cpumask_of_cpu(first_cpu(new_value)));
 
 	return full_count;
 }
@@ -981,8 +997,9 @@ static int prof_cpu_mask_read_proc (char
 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data;
+	unsigned long full_count = count, err;
+	cpumask_t new_value;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
diff -puN arch/i386/kernel/kgdb_stub.c~cpumask_t-1 arch/i386/kernel/kgdb_stub.c
--- 25/arch/i386/kernel/kgdb_stub.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/kgdb_stub.c	2003-07-03 01:19:48.000000000 -0700
@@ -195,7 +195,7 @@ void smp_send_nmi_allbutself(void);
 #define hold_init hold_on_sstep: 1,
 #define MAX_CPU_MASK (unsigned long)((1LL << MAX_NO_CPUS) - 1LL)
 #define NUM_CPUS num_online_cpus()
-extern volatile unsigned long cpu_callout_map;
+extern cpumask_t cpu_callout_map;
 #else
 #define IF_SMP(x)
 #define hold_init
@@ -355,7 +355,7 @@ static int spinlock_cpu = 0;
 				      spinlock_count++; \
 			  }
 #define KGDB_SPIN_UNLOCK(x) if( --spinlock_count == 0) spin_unlock(x)
-extern volatile unsigned long cpu_callout_map;
+extern cpumask_t cpu_callout_map;
 #else
 unsigned kgdb_spinlock = 0;
 #define KGDB_SPIN_LOCK(x) --*x
@@ -1160,11 +1160,11 @@ kgdb_handle_exception(int exceptionVecto
 	 */
 #ifdef CONFIG_SMP
 
-	if (cpu_callout_map & ~MAX_CPU_MASK) {
+	if (cpus_weight(cpu_callout_map) > MAX_NO_CPUS) {
 		printk("kgdb : too many cpus, possibly not mapped"
 		       " in contiguous space, change MAX_NO_CPUS"
 		       " in kgdb_stub and make new kernel.\n"
-		       " cpu_callout_map is %lx\n", cpu_callout_map);
+		       " cpu_callout_map is %lx\n", cpus_coerce(cpu_callout_map));
 		goto exit_just_unlock;
 	}
 
@@ -1826,7 +1826,7 @@ kgdb_handle_exception(int exceptionVecto
 		/*
 		 * Early in the bring up there will be NO cpus on line...
 		 */
-		if (!cpu_avail && cpu_online_map) {
+		if (!cpu_avail && !cpus_empty(cpu_online_map)) {
 			to_gdb("No cpus unblocked, see 'kgdb_info.hold_cpu'\n");
 			goto once_again;
 		}
diff -puN arch/i386/kernel/ldt.c~cpumask_t-1 arch/i386/kernel/ldt.c
--- 25/arch/i386/kernel/ldt.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/ldt.c	2003-07-03 01:19:48.000000000 -0700
@@ -56,9 +56,11 @@ static int alloc_ldt(mm_context_t *pc, i
 
 	if (reload) {
 #ifdef CONFIG_SMP
+		cpumask_t mask;
 		preempt_disable();
 		load_LDT(pc);
-		if (current->mm->cpu_vm_mask != (1 << smp_processor_id()))
+		mask = cpumask_of_cpu(smp_processor_id());
+		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
 			smp_call_function(flush_ldt, 0, 1, 1);
 		preempt_enable();
 #else
diff -puN arch/i386/kernel/mpparse.c~cpumask_t-1 arch/i386/kernel/mpparse.c
--- 25/arch/i386/kernel/mpparse.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/mpparse.c	2003-07-03 01:19:48.000000000 -0700
@@ -71,7 +71,7 @@ unsigned int boot_cpu_logical_apicid = -
 static unsigned int __initdata num_processors;
 
 /* Bitmask of physically existing CPUs */
-unsigned long phys_cpu_present_map;
+cpumask_t phys_cpu_present_map;
 
 u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
 
@@ -106,6 +106,7 @@ static struct mpc_config_translation *tr
 void __init MP_processor_info (struct mpc_config_processor *m)
 {
  	int ver, apicid;
+	cpumask_t tmp;
  	
 	if (!(m->mpc_cpuflag & CPU_ENABLED))
 		return;
@@ -176,7 +177,8 @@ void __init MP_processor_info (struct mp
 	}
 	ver = m->mpc_apicver;
 
-	phys_cpu_present_map |= apicid_to_cpu_present(apicid);
+	tmp = apicid_to_cpu_present(apicid);
+	cpus_or(phys_cpu_present_map, phys_cpu_present_map, tmp);
 	
 	/*
 	 * Validate version
diff -puN arch/i386/kernel/msr.c~cpumask_t-1 arch/i386/kernel/msr.c
--- 25/arch/i386/kernel/msr.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/msr.c	2003-07-03 01:19:48.000000000 -0700
@@ -242,7 +242,7 @@ static int msr_open(struct inode *inode,
   int cpu = minor(file->f_dentry->d_inode->i_rdev);
   struct cpuinfo_x86 *c = &(cpu_data)[cpu];
   
-  if ( !(cpu_online_map & (1UL << cpu)) )
+  if (!cpu_online(cpu))
     return -ENXIO;		/* No such CPU */
   if ( !cpu_has(c, X86_FEATURE_MSR) )
     return -EIO;		/* MSR not supported */
diff -puN arch/i386/kernel/reboot.c~cpumask_t-1 arch/i386/kernel/reboot.c
--- 25/arch/i386/kernel/reboot.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/reboot.c	2003-07-03 01:19:48.000000000 -0700
@@ -226,7 +226,7 @@ void machine_restart(char * __unused)
 		   if its not, default to the BSP */
 		if ((reboot_cpu == -1) ||  
 		      (reboot_cpu > (NR_CPUS -1))  || 
-		      !(phys_cpu_present_map & (1<<cpuid))) 
+		      !cpu_isset(cpuid, phys_cpu_present_map))
 			reboot_cpu = boot_cpu_physical_apicid;
 
 		reboot_smp = 0;  /* use this as a flag to only go through this once*/
diff -puN arch/i386/kernel/smpboot.c~cpumask_t-1 arch/i386/kernel/smpboot.c
--- 25/arch/i386/kernel/smpboot.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/smpboot.c	2003-07-03 01:19:48.000000000 -0700
@@ -61,12 +61,12 @@ static int __initdata smp_b_stepping;
 int smp_num_siblings = 1;
 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
 
-/* Bitmask of currently online CPUs */
-unsigned long cpu_online_map;
+/* bitmap of online cpus */
+cpumask_t cpu_online_map;
 
-static volatile unsigned long cpu_callin_map;
-volatile unsigned long cpu_callout_map;
-static unsigned long smp_commenced_mask;
+static volatile cpumask_t cpu_callin_map;
+volatile cpumask_t cpu_callout_map;
+static cpumask_t smp_commenced_mask;
 
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
@@ -268,7 +268,7 @@ static void __init synchronize_tsc_bp (v
 
 	sum = 0;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (test_bit(i, &cpu_callout_map)) {
+		if (cpu_isset(i, cpu_callout_map)) {
 			t0 = tsc_values[i];
 			sum += t0;
 		}
@@ -277,7 +277,7 @@ static void __init synchronize_tsc_bp (v
 
 	sum = 0;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (!test_bit(i, &cpu_callout_map))
+		if (!cpu_isset(i, cpu_callout_map))
 			continue;
 		delta = tsc_values[i] - avg;
 		if (delta < 0)
@@ -353,7 +353,7 @@ void __init smp_callin(void)
 	 */
 	phys_id = GET_APIC_ID(apic_read(APIC_ID));
 	cpuid = smp_processor_id();
-	if (test_bit(cpuid, &cpu_callin_map)) {
+	if (cpu_isset(cpuid, cpu_callin_map)) {
 		printk("huh, phys CPU#%d, CPU#%d already present??\n",
 					phys_id, cpuid);
 		BUG();
@@ -376,7 +376,7 @@ void __init smp_callin(void)
 		/*
 		 * Has the boot CPU finished it's STARTUP sequence?
 		 */
-		if (test_bit(cpuid, &cpu_callout_map))
+		if (cpu_isset(cpuid, cpu_callout_map))
 			break;
 		rep_nop();
 	}
@@ -417,7 +417,7 @@ void __init smp_callin(void)
 	/*
 	 * Allow the master to continue.
 	 */
-	set_bit(cpuid, &cpu_callin_map);
+	cpu_set(cpuid, cpu_callin_map);
 
 	/*
 	 *      Synchronize the TSC with the BP
@@ -442,7 +442,7 @@ int __init start_secondary(void *unused)
 	 */
 	cpu_init();
 	smp_callin();
-	while (!test_bit(smp_processor_id(), &smp_commenced_mask))
+	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
 		rep_nop();
 	setup_secondary_APIC_clock();
 	if (nmi_watchdog == NMI_IO_APIC) {
@@ -456,7 +456,7 @@ int __init start_secondary(void *unused)
 	 * the local TLBs too.
 	 */
 	local_flush_tlb();
-	set_bit(smp_processor_id(), &cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
 	wmb();
 	return cpu_idle();
 }
@@ -499,16 +499,16 @@ static struct task_struct * __init fork_
 #ifdef CONFIG_NUMA
 
 /* which logical CPUs are on which nodes */
-volatile unsigned long node_2_cpu_mask[MAX_NR_NODES] = 
-						{ [0 ... MAX_NR_NODES-1] = 0 };
+cpumask_t node_2_cpu_mask[MAX_NR_NODES] =
+				{ [0 ... MAX_NR_NODES-1] = CPU_MASK_NONE };
 /* which node each logical CPU is on */
-volatile int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
+int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
 
 /* set up a mapping between cpu and node. */
 static inline void map_cpu_to_node(int cpu, int node)
 {
 	printk("Mapping cpu %d to node %d\n", cpu, node);
-	node_2_cpu_mask[node] |= (1 << cpu);
+	cpu_set(cpu, node_2_cpu_mask[node]);
 	cpu_2_node[cpu] = node;
 }
 
@@ -519,7 +519,7 @@ static inline void unmap_cpu_to_node(int
 
 	printk("Unmapping cpu %d from all nodes\n", cpu);
 	for (node = 0; node < MAX_NR_NODES; node ++)
-		node_2_cpu_mask[node] &= ~(1 << cpu);
+		cpu_clear(cpu, node_2_cpu_mask[node]);
 	cpu_2_node[cpu] = -1;
 }
 #else /* !CONFIG_NUMA */
@@ -770,7 +770,7 @@ wakeup_secondary_cpu(int phys_apicid, un
 }
 #endif	/* WAKE_SECONDARY_VIA_INIT */
 
-extern unsigned long cpu_initialized;
+extern cpumask_t cpu_initialized;
 
 static int __init do_boot_cpu(int apicid)
 /*
@@ -836,19 +836,19 @@ static int __init do_boot_cpu(int apicid
 		 * allow APs to start initializing.
 		 */
 		Dprintk("Before Callout %d.\n", cpu);
-		set_bit(cpu, &cpu_callout_map);
+		cpu_set(cpu, cpu_callout_map);
 		Dprintk("After Callout %d.\n", cpu);
 
 		/*
 		 * Wait 5s total for a response
 		 */
 		for (timeout = 0; timeout < 50000; timeout++) {
-			if (test_bit(cpu, &cpu_callin_map))
+			if (cpu_isset(cpu, cpu_callin_map))
 				break;	/* It has booted */
 			udelay(100);
 		}
 
-		if (test_bit(cpu, &cpu_callin_map)) {
+		if (cpu_isset(cpu, cpu_callin_map)) {
 			/* number CPUs logically, starting from 1 (BSP is 0) */
 			Dprintk("OK.\n");
 			printk("CPU%d: ", cpu);
@@ -869,8 +869,8 @@ static int __init do_boot_cpu(int apicid
 	if (boot_error) {
 		/* Try to put things back the way they were before ... */
 		unmap_cpu_to_logical_apicid(cpu);
-		clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
-		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
 		cpucount--;
 	}
 
@@ -957,7 +957,7 @@ static void __init smp_boot_cpus(unsigne
 	if (!smp_found_config) {
 		printk(KERN_NOTICE "SMP motherboard not detected.\n");
 		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = 1;
+		phys_cpu_present_map = cpumask_of_cpu(0);
 		if (APIC_init_uniprocessor())
 			printk(KERN_NOTICE "Local APIC not detected."
 					   " Using dummy APIC emulation.\n");
@@ -973,7 +973,7 @@ static void __init smp_boot_cpus(unsigne
 	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
 		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
 				boot_cpu_physical_apicid);
-		phys_cpu_present_map |= (1 << hard_smp_processor_id());
+		cpu_set(hard_smp_processor_id(), phys_cpu_present_map);
 	}
 
 	/*
@@ -984,7 +984,7 @@ static void __init smp_boot_cpus(unsigne
 			boot_cpu_physical_apicid);
 		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
 		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = 1;
+		phys_cpu_present_map = cpumask_of_cpu(0);
 		return;
 	}
 
@@ -997,7 +997,7 @@ static void __init smp_boot_cpus(unsigne
 		smp_found_config = 0;
 		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
 		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = 1;
+		phys_cpu_present_map = cpumask_of_cpu(0);
 		return;
 	}
 
@@ -1017,10 +1017,10 @@ static void __init smp_boot_cpus(unsigne
 	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
 	 * clustered apic ID.
 	 */
-	Dprintk("CPU present map: %lx\n", phys_cpu_present_map);
+	Dprintk("CPU present map: %lx\n", cpus_coerce(phys_cpu_present_map));
 
 	kicked = 1;
-	for (bit = 0; kicked < NR_CPUS && bit < BITS_PER_LONG; bit++) {
+	for (bit = 0; kicked < NR_CPUS && bit < 8*sizeof(cpumask_t); bit++) {
 		apicid = cpu_present_to_apicid(bit);
 		/*
 		 * Don't even attempt to start the boot CPU!
@@ -1055,7 +1055,7 @@ static void __init smp_boot_cpus(unsigne
 	} else {
 		unsigned long bogosum = 0;
 		for (cpu = 0; cpu < NR_CPUS; cpu++)
-			if (cpu_callout_map & (1<<cpu))
+			if (cpu_isset(cpu, cpu_callout_map))
 				bogosum += cpu_data[cpu].loops_per_jiffy;
 		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 			cpucount+1,
@@ -1087,10 +1087,11 @@ static void __init smp_boot_cpus(unsigne
 		
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 			int 	i;
-			if (!test_bit(cpu, &cpu_callout_map)) continue;
+			if (!cpu_isset(cpu, cpu_callout_map))
+				continue;
 
 			for (i = 0; i < NR_CPUS; i++) {
-				if (i == cpu || !test_bit(i, &cpu_callout_map))
+				if (i == cpu || !cpu_isset(i, cpu_callout_map))
 					continue;
 				if (phys_proc_id[cpu] == phys_proc_id[i]) {
 					cpu_sibling_map[cpu] = i;
@@ -1125,28 +1126,28 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_callout_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_callout_map);
 }
 
 int __devinit __cpu_up(unsigned int cpu)
 {
 	/* This only works at boot for x86.  See "rewrite" above. */
-	if (test_bit(cpu, &smp_commenced_mask)) {
+	if (cpu_isset(cpu, smp_commenced_mask)) {
 		local_irq_enable();
 		return -ENOSYS;
 	}
 
 	/* In case one didn't come up */
-	if (!test_bit(cpu, &cpu_callin_map)) {
+	if (!cpu_isset(cpu, cpu_callin_map)) {
 		local_irq_enable();
 		return -EIO;
 	}
 
 	local_irq_enable();
 	/* Unleash the CPU! */
-	set_bit(cpu, &smp_commenced_mask);
-	while (!test_bit(cpu, &cpu_online_map))
+	cpu_set(cpu, smp_commenced_mask);
+	while (!cpu_isset(cpu, cpu_online_map))
 		mb();
 	return 0;
 }
@@ -1154,7 +1155,8 @@ int __devinit __cpu_up(unsigned int cpu)
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 #ifdef CONFIG_X86_IO_APIC
-	setup_ioapic_dest(TARGET_CPUS);
+	cpumask_t targets = CPU_MASK_ALL;
+	setup_ioapic_dest(targets);
 #endif
 	zap_low_mappings();
 }
diff -puN arch/i386/kernel/smp.c~cpumask_t-1 arch/i386/kernel/smp.c
--- 25/arch/i386/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -155,8 +155,12 @@ void send_IPI_self(int vector)
 	__send_IPI_shortcut(APIC_DEST_SELF, vector);
 }
 
-inline void send_IPI_mask_bitmask(int mask, int vector)
+/*
+ * This is only used on smaller machines.
+ */
+inline void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
 {
+	unsigned long mask = cpus_coerce(cpumask);
 	unsigned long cfg;
 	unsigned long flags;
 
@@ -186,10 +190,10 @@ inline void send_IPI_mask_bitmask(int ma
 	local_irq_restore(flags);
 }
 
-inline void send_IPI_mask_sequence(int mask, int vector)
+inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
 {
 	unsigned long cfg, flags;
-	unsigned int query_cpu, query_mask;
+	unsigned int query_cpu;
 
 	/*
 	 * Hack. The clustered APIC addressing mode doesn't allow us to send 
@@ -200,8 +204,7 @@ inline void send_IPI_mask_sequence(int m
 	local_irq_save(flags);
 
 	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
-		query_mask = 1 << query_cpu;
-		if (query_mask & mask) {
+		if (cpu_isset(query_cpu, mask)) {
 		
 			/*
 			 * Wait for idle.
@@ -238,7 +241,7 @@ inline void send_IPI_mask_sequence(int m
  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
  */
 
-static volatile unsigned long flush_cpumask;
+static volatile cpumask_t flush_cpumask;
 static struct mm_struct * flush_mm;
 static unsigned long flush_va;
 static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
@@ -255,7 +258,7 @@ static inline void leave_mm (unsigned lo
 {
 	if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
 		BUG();
-	clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+	cpu_clear(cpu, cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
 	load_cr3(swapper_pg_dir);
 }
 
@@ -265,7 +268,7 @@ static inline void leave_mm (unsigned lo
  * [cpu0: the cpu that switches]
  * 1) switch_mm() either 1a) or 1b)
  * 1a) thread switch to a different mm
- * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
  * 	Stop ipi delivery for the old mm. This is not synchronized with
  * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
  * 	for the wrong mm, and in the worst case we perform a superflous
@@ -275,7 +278,7 @@ static inline void leave_mm (unsigned lo
  *	was in lazy tlb mode.
  * 1a3) update cpu_tlbstate[].active_mm
  * 	Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
  * 	Now the other cpus will send tlb flush ipis.
  * 1a4) change cr3.
  * 1b) thread switch without mm change
@@ -311,7 +314,7 @@ asmlinkage void smp_invalidate_interrupt
 
 	cpu = get_cpu();
 
-	if (!test_bit(cpu, &flush_cpumask))
+	if (!cpu_isset(cpu, flush_cpumask))
 		goto out;
 		/* 
 		 * This was a BUG() but until someone can quote me the
@@ -332,15 +335,17 @@ asmlinkage void smp_invalidate_interrupt
 			leave_mm(cpu);
 	}
 	ack_APIC_irq();
-	clear_bit(cpu, &flush_cpumask);
-
+	smp_mb__before_clear_bit();
+	cpu_clear(cpu, flush_cpumask);
+	smp_mb__after_clear_bit();
 out:
 	put_cpu_no_resched();
 }
 
-static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
 						unsigned long va)
 {
+	cpumask_t tmp;
 	/*
 	 * A couple of (to be removed) sanity checks:
 	 *
@@ -348,14 +353,12 @@ static void flush_tlb_others (unsigned l
 	 * - current CPU must not be in mask
 	 * - mask must exist :)
 	 */
-	if (!cpumask)
-		BUG();
-	if ((cpumask & cpu_online_map) != cpumask)
-		BUG();
-	if (cpumask & (1 << smp_processor_id()))
-		BUG();
-	if (!mm)
-		BUG();
+	BUG_ON(cpus_empty(cpumask));
+
+	cpus_and(tmp, cpumask, cpu_online_map);
+	BUG_ON(!cpus_equal(cpumask, tmp));
+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+	BUG_ON(!mm);
 
 	/*
 	 * i'm not happy about this global shared spinlock in the
@@ -367,15 +370,26 @@ static void flush_tlb_others (unsigned l
 	
 	flush_mm = mm;
 	flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
 	atomic_set_mask(cpumask, &flush_cpumask);
+#else
+	{
+		int k;
+		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
+		unsigned long *cpu_mask = (unsigned long *)&cpumask;
+		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
+			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
+	}
+#endif
 	/*
 	 * We have to send the IPI only to
 	 * CPUs affected.
 	 */
 	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
 
-	while (flush_cpumask)
-		/* nothing. lockup detection does not belong here */;
+	while (!cpus_empty(flush_cpumask))
+		/* nothing. lockup detection does not belong here */
+		mb();
 
 	flush_mm = NULL;
 	flush_va = 0;
@@ -385,23 +399,25 @@ static void flush_tlb_others (unsigned l
 void flush_tlb_current_task(void)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long cpu_mask;
+	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
 
 	local_flush_tlb();
-	if (cpu_mask)
+	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 	preempt_enable();
 }
 
 void flush_tlb_mm (struct mm_struct * mm)
 {
-	unsigned long cpu_mask;
+	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
 		if (current->mm)
@@ -409,7 +425,7 @@ void flush_tlb_mm (struct mm_struct * mm
 		else
 			leave_mm(smp_processor_id());
 	}
-	if (cpu_mask)
+	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 
 	preempt_enable();
@@ -418,10 +434,11 @@ void flush_tlb_mm (struct mm_struct * mm
 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	unsigned long cpu_mask;
+	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
 		if(current->mm)
@@ -430,7 +447,7 @@ void flush_tlb_page(struct vm_area_struc
 		 	leave_mm(smp_processor_id());
 	}
 
-	if (cpu_mask)
+	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, va);
 
 	preempt_enable();
@@ -457,7 +474,7 @@ void flush_tlb_all(void)
  */
 void smp_send_reschedule(int cpu)
 {
-	send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
+	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
 }
 #ifdef CONFIG_KGDB
 /*
@@ -543,7 +560,7 @@ static void stop_this_cpu (void * dummy)
 	/*
 	 * Remove this CPU:
 	 */
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	local_irq_disable();
 	disable_local_APIC();
 	if (cpu_data[smp_processor_id()].hlt_works_ok)
diff -puN arch/i386/mach-generic/bigsmp.c~cpumask_t-1 arch/i386/mach-generic/bigsmp.c
--- 25/arch/i386/mach-generic/bigsmp.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/mach-generic/bigsmp.c	2003-07-03 01:19:48.000000000 -0700
@@ -3,6 +3,9 @@
  * Drives the local APIC in "clustered mode".
  */
 #define APIC_DEFINITION 1
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <asm/genapic.h>
 #include <asm/fixmap.h>
 #include <asm/apicdef.h>
diff -puN arch/i386/mach-generic/default.c~cpumask_t-1 arch/i386/mach-generic/default.c
--- 25/arch/i386/mach-generic/default.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/mach-generic/default.c	2003-07-03 01:19:48.000000000 -0700
@@ -2,6 +2,9 @@
  * Default generic APIC driver. This handles upto 8 CPUs.
  */
 #define APIC_DEFINITION 1
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <asm/mach-default/mach_apicdef.h>
 #include <asm/genapic.h>
 #include <asm/fixmap.h>
diff -puN arch/i386/mach-generic/probe.c~cpumask_t-1 arch/i386/mach-generic/probe.c
--- 25/arch/i386/mach-generic/probe.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/mach-generic/probe.c	2003-07-03 01:19:48.000000000 -0700
@@ -3,6 +3,9 @@
  * 
  * Generic x86 APIC driver probe layer.
  */  
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/ctype.h>
diff -puN arch/i386/mach-generic/summit.c~cpumask_t-1 arch/i386/mach-generic/summit.c
--- 25/arch/i386/mach-generic/summit.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/mach-generic/summit.c	2003-07-03 01:19:48.000000000 -0700
@@ -2,6 +2,9 @@
  * APIC driver for the IBM "Summit" chipset.
  */
 #define APIC_DEFINITION 1
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <asm/genapic.h>
 #include <asm/fixmap.h>
 #include <asm/apicdef.h>
diff -puN arch/i386/mach-visws/mpparse.c~cpumask_t-1 arch/i386/mach-visws/mpparse.c
--- 25/arch/i386/mach-visws/mpparse.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/mach-visws/mpparse.c	2003-07-03 01:19:48.000000000 -0700
@@ -26,7 +26,7 @@ unsigned int boot_cpu_physical_apicid = 
 unsigned int boot_cpu_logical_apicid = -1U;
 
 /* Bitmask of physically existing CPUs */
-unsigned long phys_cpu_present_map;
+cpumask_t phys_cpu_present_map;
 
 
 /*
@@ -38,6 +38,7 @@ unsigned long phys_cpu_present_map;
 void __init MP_processor_info (struct mpc_config_processor *m)
 {
  	int ver, logical_apicid;
+	cpumask_t apic_cpus;
  	
 	if (!(m->mpc_cpuflag & CPU_ENABLED))
 		return;
@@ -62,7 +63,8 @@ void __init MP_processor_info (struct mp
 	}
 	ver = m->mpc_apicver;
 
-	phys_cpu_present_map |= apicid_to_cpu_present(m->mpc_apicid);
+	apic_cpus = apicid_to_cpu_present(m->mpc_apicid);
+	cpus_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
 	/*
 	 * Validate version
 	 */
diff -puN arch/i386/mach-voyager/voyager_basic.c~cpumask_t-1 arch/i386/mach-voyager/voyager_basic.c
diff -puN arch/i386/mach-voyager/voyager_smp.c~cpumask_t-1 arch/i386/mach-voyager/voyager_smp.c
--- 25/arch/i386/mach-voyager/voyager_smp.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/i386/mach-voyager/voyager_smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -75,15 +75,15 @@ static int voyager_extended_cpus = 1;
 int smp_found_config = 0;
 
 /* Used for the invalidate map that's also checked in the spinlock */
-volatile unsigned long smp_invalidate_needed;
+static volatile unsigned long smp_invalidate_needed;
 
 /* Bitmask of currently online CPUs - used by setup.c for
    /proc/cpuinfo, visible externally but still physical */
-unsigned long cpu_online_map = 0;
+cpumask_t cpu_online_map = CPU_MASK_NONE;
 
 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
  * by scheduler but indexed physically */
-unsigned long phys_cpu_present_map = 0;
+cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 
 /* estimate of time used to flush the SMP-local cache - used in
  * processor affinity calculations */
@@ -108,7 +108,7 @@ static void enable_local_vic_irq(unsigne
 static void disable_local_vic_irq(unsigned int irq);
 static void before_handle_vic_irq(unsigned int irq);
 static void after_handle_vic_irq(unsigned int irq);
-static void set_vic_irq_affinity(unsigned int irq, unsigned long mask);
+static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask);
 static void ack_vic_irq(unsigned int irq);
 static void vic_enable_cpi(void);
 static void do_boot_cpu(__u8 cpuid);
@@ -128,13 +128,12 @@ send_one_QIC_CPI(__u8 cpu, __u8 cpi)
 static inline void
 send_QIC_CPI(__u32 cpuset, __u8 cpi)
 {
-	int mask;
-	__u8 cpu;
+	int cpu;
 
-	for_each_cpu(cpu, mask) {
+	for_each_cpu(cpu, mk_cpumask_const(cpu_online_map)) {
 		if(cpuset & (1<<cpu)) {
 #ifdef VOYAGER_DEBUG
-			if(!test_bit(cpu, cpu_online_map))
+			if(!cpu_isset(cpu, cpu_online_map))
 				VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu));
 #endif
 			send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
@@ -155,7 +154,7 @@ static inline void
 send_CPI_allbutself(__u8 cpi)
 {
 	__u8 cpu = smp_processor_id();
-	__u32 mask = (cpu_online_map & (~(1<<cpu)));
+	__u32 mask = cpus_coerce(cpu_online_map) & ~(1 << cpu);
 	send_CPI(mask, cpi);
 }
 
@@ -243,11 +242,11 @@ static __u32 cpu_booted_map;
 
 /* the synchronize flag used to hold all secondary CPUs spinning in
  * a tight loop until the boot sequence is ready for them */
-static unsigned long smp_commenced_mask = 0;
+static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 
 /* This is for the new dynamic CPU boot code */
-volatile unsigned long cpu_callin_map = 0;
-volatile unsigned long cpu_callout_map = 0;
+volatile cpumask_t cpu_callin_map = CPU_MASK_NONE;
+volatile cpumask_t cpu_callout_map = CPU_MASK_NONE;
 
 /* The per processor IRQ masks (these are usually kept in sync) */
 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -395,7 +394,7 @@ find_smp_config(void)
 	for(i=0; i<NR_CPUS; i++) {
 		cpu_irq_affinity[i] = ~0;
 	}
-	cpu_online_map = (1<<boot_cpu_id);
+	cpu_online_map = cpumask_of_cpu(boot_cpu_id);
 
 	/* The boot CPU must be extended */
 	voyager_extended_vic_processors = 1<<boot_cpu_id;
@@ -404,11 +403,11 @@ find_smp_config(void)
 	/* set up everything for just this CPU, we can alter
 	 * this as we start the other CPUs later */
 	/* now get the CPU disposition from the extended CMOS */
-	phys_cpu_present_map = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
-	phys_cpu_present_map |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
-	phys_cpu_present_map |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
-	phys_cpu_present_map |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
-	printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", phys_cpu_present_map);
+	phys_cpu_present_map = cpus_promote(voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK));
+	cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
+	cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
+	cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
+	printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_coerce(phys_cpu_present_map));
 	/* Here we set up the VIC to enable SMP */
 	/* enable the CPIs by writing the base vector to their register */
 	outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
@@ -509,18 +508,18 @@ start_secondary(void *unused)
 	 * permission to proceed.  Without this, the new per CPU stuff
 	 * in the softirqs will fail */
 	local_irq_disable();
-	set_bit(cpuid, &cpu_callin_map);
+	cpu_set(cpuid, cpu_callin_map);
 
 	/* signal that we're done */
 	cpu_booted_map = 1;
 
-	while (!test_bit(cpuid, &smp_commenced_mask))
+	while (!cpu_isset(cpuid, smp_commenced_mask))
 		rep_nop();
 	local_irq_enable();
 
 	local_flush_tlb();
 
-	set_bit(cpuid, &cpu_online_map);
+	cpu_set(cpuid, cpu_online_map);
 	wmb();
 	return cpu_idle();
 }
@@ -674,14 +673,14 @@ do_boot_cpu(__u8 cpu)
 	free_page((unsigned long)page_table_copies);
 #endif
 	  
-	if(cpu_booted_map) {
+	if (cpu_booted_map) {
 		VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
 			cpu, smp_processor_id()));
 	
 		printk("CPU%d: ", cpu);
 		print_cpu_info(&cpu_data[cpu]);
 		wmb();
-		set_bit(cpu, &cpu_callout_map);
+		cpu_set(cpu, cpu_callout_map);
 	}
 	else {
 		printk("CPU%d FAILED TO BOOT: ", cpu);
@@ -708,13 +707,12 @@ smp_boot_cpus(void)
 		/* now that the cat has probed the Voyager System Bus, sanity
 		 * check the cpu map */
 		if( ((voyager_quad_processors | voyager_extended_vic_processors)
-		     & phys_cpu_present_map) != phys_cpu_present_map) {
+		     & cpus_coerce(phys_cpu_present_map)) != cpus_coerce(phys_cpu_present_map)) {
 			/* should panic */
 			printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
 		}
-	} else if(voyager_level == 4) {
-		voyager_extended_vic_processors = phys_cpu_present_map;
-	}
+	} else if(voyager_level == 4)
+		voyager_extended_vic_processors = cpus_coerce(phys_cpu_present_map);
 
 	/* this sets up the idle task to run on the current cpu */
 	voyager_extended_cpus = 1;
@@ -740,13 +738,13 @@ smp_boot_cpus(void)
 	/* enable our own CPIs */
 	vic_enable_cpi();
 
-	set_bit(boot_cpu_id, &cpu_online_map);
-	set_bit(boot_cpu_id, &cpu_callout_map);
+	cpu_set(boot_cpu_id, cpu_online_map);
+	cpu_set(boot_cpu_id, cpu_callout_map);
 	
 	/* loop over all the extended VIC CPUs and boot them.  The 
 	 * Quad CPUs must be bootstrapped by their extended VIC cpu */
 	for(i = 0; i < NR_CPUS; i++) {
-		if( i == boot_cpu_id || ((1<<i) & (phys_cpu_present_map) ) == 0)
+		if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
 			continue;
 		do_boot_cpu(i);
 		/* This udelay seems to be needed for the Quad boots
@@ -758,7 +756,7 @@ smp_boot_cpus(void)
 	{
 		unsigned long bogosum = 0;
 		for (i = 0; i < NR_CPUS; i++)
-			if (cpu_online_map & (1<<i))
+			if (cpu_isset(i, cpu_online_map))
 				bogosum += cpu_data[i].loops_per_jiffy;
 		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 			cpucount+1,
@@ -865,7 +863,7 @@ leave_mm (unsigned long cpu)
 {
 	if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
 		BUG();
-	clear_bit(cpu,  &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+	cpu_clear(cpu,  cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
 	load_cr3(swapper_pg_dir);
 }
 
@@ -878,7 +876,7 @@ smp_invalidate_interrupt(void)
 {
 	__u8 cpu = get_cpu();
 
-	if(!test_bit(cpu, &smp_invalidate_needed))
+	if (!(smp_invalidate_needed & (1UL << cpu)))
 		goto out;
 	/* This will flood messages.  Don't uncomment unless you see
 	 * Problems with cross cpu invalidation
@@ -895,7 +893,7 @@ smp_invalidate_interrupt(void)
 		} else
 			leave_mm(cpu);
 	}
-	clear_bit(cpu, &smp_invalidate_needed);
+	smp_invalidate_needed |= 1UL << cpu;
  out:
 	put_cpu_no_resched();
 }
@@ -912,7 +910,7 @@ flush_tlb_others (unsigned long cpumask,
 
 	if (!cpumask)
 		BUG();
-	if ((cpumask & cpu_online_map) != cpumask)
+	if ((cpumask & cpus_coerce(cpu_online_map)) != cpumask)
 		BUG();
 	if (cpumask & (1 << smp_processor_id()))
 		BUG();
@@ -954,7 +952,7 @@ flush_tlb_current_task(void)
 
 	preempt_disable();
 
-	cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
+	cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
 	local_flush_tlb();
 	if (cpu_mask)
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
@@ -970,7 +968,7 @@ flush_tlb_mm (struct mm_struct * mm)
 
 	preempt_disable();
 
-	cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
+	cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
 
 	if (current->active_mm == mm) {
 		if (current->mm)
@@ -991,7 +989,7 @@ void flush_tlb_page(struct vm_area_struc
 
 	preempt_disable();
 
-	cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
+	cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id());
 	if (current->active_mm == mm) {
 		if(current->mm)
 			__flush_tlb_one(va);
@@ -1033,7 +1031,7 @@ static void
 smp_stop_cpu_function(void *dummy)
 {
 	VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	local_irq_disable();
 	for(;;)
 	       __asm__("hlt");
@@ -1100,7 +1098,7 @@ smp_call_function (void (*func) (void *i
 		   int wait)
 {
 	struct call_data_struct data;
-	__u32 mask = cpu_online_map;
+	__u32 mask = cpus_coerce(cpu_online_map);
 
 	mask &= ~(1<<smp_processor_id());
 
@@ -1451,8 +1449,7 @@ smp_intr_init(void)
 static void
 send_CPI(__u32 cpuset, __u8 cpi)
 {
-	int mask;
-	__u8 cpu;
+	int cpu;
 	__u32 quad_cpuset = (cpuset & voyager_quad_processors);
 
 	if(cpi < VIC_START_FAKE_CPI) {
@@ -1467,7 +1464,7 @@ send_CPI(__u32 cpuset, __u8 cpi)
 	cpuset &= 0xff;		/* only first 8 CPUs vaild for VIC CPI */
 	if(cpuset == 0)
 		return;
-	for_each_cpu(cpu, mask) {
+	for_each_cpu(cpu, mk_cpumask_const(cpu_online_map)) {
 		if(cpuset & (1<<cpu))
 			set_bit(cpi, &vic_cpi_mailbox[cpu]);
 	}
@@ -1571,10 +1568,9 @@ startup_vic_irq(unsigned int irq)
 static void
 enable_vic_irq(unsigned int irq)
 {
-	int tmpmask;
 	/* linux doesn't to processor-irq affinity, so enable on
 	 * all CPUs we know about */
-	__u8 cpu = smp_processor_id(), real_cpu;
+	int cpu = smp_processor_id(), real_cpu;
 	__u16 mask = (1<<irq);
 	__u32 processorList = 0;
 	unsigned long flags;
@@ -1582,7 +1578,7 @@ enable_vic_irq(unsigned int irq)
 	VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
 		irq, cpu, cpu_irq_affinity[cpu]));
 	spin_lock_irqsave(&vic_irq_lock, flags);
-	for_each_cpu(real_cpu, tmpmask) {
+	for_each_cpu(real_cpu, mk_cpumask_const(cpu_online_map)) {
 		if(!(voyager_extended_vic_processors & (1<<real_cpu)))
 			continue;
 		if(!(cpu_irq_affinity[real_cpu] & mask)) {
@@ -1727,7 +1723,7 @@ after_handle_vic_irq(unsigned int irq)
 
 			printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
 			       cpu, irq);
-			for_each_cpu(real_cpu, mask) {
+			for_each_cpu(real_cpu, mk_cpumask_const(mask)) {
 
 				outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
 				     VIC_PROCESSOR_ID);
@@ -1783,15 +1779,16 @@ after_handle_vic_irq(unsigned int irq)
  * the selected processors */
 
 void
-set_vic_irq_affinity(unsigned int irq, unsigned long mask) 
+set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
 {
 	/* Only extended processors handle interrupts */
-	unsigned long real_mask = mask & voyager_extended_vic_processors;
-	unsigned long irq_mask = (1<<irq);
-	int tmpmask;
-	__u8 cpu;
+	unsigned long real_mask;
+	unsigned long irq_mask = 1 << irq;
+	int cpu;
+
+	real_mask = cpus_coerce(mask) & voyager_extended_vic_processors;
 	
-	if(mask == 0)
+	if(cpus_coerce(mask) == 0)
 		/* can't have no cpu's to accept the interrupt -- extremely
 		 * bad things will happen */
 		return;
@@ -1811,8 +1808,8 @@ set_vic_irq_affinity(unsigned int irq, u
 		 * bus) */
 		return;
 
-	for_each_cpu(cpu, tmpmask) {
-		unsigned long cpu_mask = (1<<cpu);
+	for_each_cpu(cpu, mk_cpumask_const(cpu_online_map)) {
+		unsigned long cpu_mask = 1 << cpu;
 		
 		if(cpu_mask & real_mask) {
 			/* enable the interrupt for this cpu */
@@ -1874,11 +1871,10 @@ vic_enable_cpi(void)
 void
 voyager_smp_dump()
 {
-	int mask;
-	__u8 old_cpu = smp_processor_id(), cpu;
+	int old_cpu = smp_processor_id(), cpu;
 
 	/* dump the interrupt masks of each processor */
-	for_each_cpu(cpu, mask) {
+	for_each_cpu(cpu, mk_cpumask_const(cpu_online_map)) {
 		__u16 imr, isr, irr;
 		unsigned long flags;
 
@@ -1936,23 +1932,23 @@ smp_prepare_cpus(unsigned int max_cpus)
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_callout_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_callout_map);
 }
 
 int __devinit
 __cpu_up(unsigned int cpu)
 {
 	/* This only works at boot for x86.  See "rewrite" above. */
-	if (test_bit(cpu, &smp_commenced_mask))
+	if (cpu_isset(cpu, smp_commenced_mask))
 		return -ENOSYS;
 
 	/* In case one didn't come up */
-	if (!test_bit(cpu, &cpu_callin_map))
+	if (!cpu_isset(cpu, cpu_callin_map))
 		return -EIO;
 	/* Unleash the CPU! */
-	set_bit(cpu, &smp_commenced_mask);
-	while (!test_bit(cpu, &cpu_online_map))
+	cpu_set(cpu, smp_commenced_mask);
+	while (!cpu_isset(cpu, cpu_online_map))
 		mb();
 	return 0;
 }
diff -puN arch/ia64/kernel/iosapic.c~cpumask_t-1 arch/ia64/kernel/iosapic.c
--- 25/arch/ia64/kernel/iosapic.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/iosapic.c	2003-07-03 01:19:48.000000000 -0700
@@ -274,7 +274,7 @@ unmask_irq (unsigned int irq)
 
 
 static void
-iosapic_set_affinity (unsigned int irq, unsigned long mask)
+iosapic_set_affinity (unsigned int irq, cpumask_t mask)
 {
 #ifdef CONFIG_SMP
 	unsigned long flags;
@@ -287,12 +287,10 @@ iosapic_set_affinity (unsigned int irq, 
 	irq &= (~IA64_IRQ_REDIRECTED);
 	vec = irq_to_vector(irq);
 
-	mask &= cpu_online_map;
-
-	if (!mask || vec >= IA64_NUM_VECTORS)
+	if (cpus_empty(mask) || vec >= IA64_NUM_VECTORS)
 		return;
 
-	dest = cpu_physical_id(ffz(~mask));
+	dest = cpu_physical_id(first_cpu(mask));
 
 	rte_index = iosapic_intr_info[vec].rte_index;
 	addr = iosapic_intr_info[vec].addr;
diff -puN arch/ia64/kernel/irq.c~cpumask_t-1 arch/ia64/kernel/irq.c
--- 25/arch/ia64/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -898,13 +898,14 @@ int setup_irq(unsigned int irq, struct i
 static struct proc_dir_entry * root_irq_dir;
 static struct proc_dir_entry * irq_dir [NR_IRQS];
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
-static unsigned int parse_hex_value (const char *buffer,
-		unsigned long count, unsigned long *ret)
+static unsigned int parse_hex_value(const char *buffer,
+		unsigned long count, cpumask_t *ret)
 {
-	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value, i;
+	unsigned char hexnum[HEX_DIGITS];
+	cpumask_t value = CPU_MASK_NONE;
+	unsigned long i;
 
 	if (!count)
 		return -EINVAL;
@@ -917,10 +918,9 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
-
 	for (i = 0; i < count; i++) {
 		unsigned int c = hexnum[i];
+		int k;
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -929,7 +929,10 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 4);
+		for (k = 0; k < 4; ++k)
+			if (test_bit(k, (unsigned long *)&c))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -940,12 +943,15 @@ out:
 
 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
-	unsigned long mask = 1UL<<cpu_logical_id(hwid);
+	cpumask_t mask = CPU_MASK_NONE;
+
+	cpu_set(cpu_logical_id(hwid), mask);
 
 	if (irq < NR_IRQS) {
 		irq_affinity[irq] = mask;
@@ -956,10 +962,21 @@ void set_irq_affinity_info (unsigned int
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	if (count < HEX_DIGITS+3)
+	int k, len;
+	cpumask_t tmp = irq_affinity[(long)data];
+
+	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%s%08lx\n", irq_redir[(unsigned long)data] ? "r " : "",
-			irq_affinity[(unsigned long)data]);
+
+	len = 0;
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int irq_affinity_write_proc (struct file *file, const char *buffer,
@@ -967,7 +984,7 @@ static int irq_affinity_write_proc (stru
 {
 	unsigned int irq = (unsigned long) data;
 	int full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 	const char *buf = buffer;
 	irq_desc_t *desc = irq_descp(irq);
 	int redir;
@@ -991,7 +1008,8 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	desc->handler->set_affinity(irq | (redir? IA64_IRQ_REDIRECTED : 0), new_value);
@@ -1003,18 +1021,28 @@ static int irq_affinity_write_proc (stru
 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
+	cpumask_t *mask = (cpumask_t *)data;
+	int k, len = 0;
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", *mask);
+
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(unsigned long); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(*mask));
+		len += j;
+		page += j;
+		cpus_shift_right(*mask, *mask, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
-	int full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data;
+	unsigned long full_count = count, err;
+	cpumask_t new_value;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
@@ -1058,7 +1086,7 @@ static void register_irq_proc (unsigned 
 #endif
 }
 
-unsigned long prof_cpu_mask = -1;
+cpumask_t prof_cpu_mask = CPU_MASK_ALL;
 
 void init_irq_proc (void)
 {
diff -puN arch/ia64/kernel/perfmon.c~cpumask_t-1 arch/ia64/kernel/perfmon.c
--- 25/arch/ia64/kernel/perfmon.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/perfmon.c	2003-07-03 01:19:48.000000000 -0700
@@ -221,14 +221,6 @@
 
 #define PFM_REG_RETFLAG_SET(flags, val)	do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
 
-#ifdef CONFIG_SMP
-#define PFM_CPU_ONLINE_MAP	cpu_online_map
-#define cpu_is_online(i)	(PFM_CPU_ONLINE_MAP & (1UL << i))
-#else
-#define PFM_CPU_ONLINE_MAP	 1UL
-#define cpu_is_online(i)	(i==0)
-#endif
-
 /*
  * cmp0 must be the value of pmc0
  */
@@ -5354,7 +5346,7 @@ pfm_proc_info(char *page)
 		p += sprintf(p, "ovfl_mask                 : 0x%lx\n", pmu_conf.ovfl_val);
 
 	for(i=0; i < NR_CPUS; i++) {
-		if (cpu_is_online(i) == 0) continue;
+		if (cpu_online(i) == 0) continue;
 		p += sprintf(p, "CPU%-2d overflow intrs      : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
 		p += sprintf(p, "CPU%-2d overflow cycles     : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles);
 		p += sprintf(p, "CPU%-2d overflow min        : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles_min);
@@ -5372,7 +5364,7 @@ pfm_proc_info(char *page)
 		p += sprintf(p, "CPU%-2d activations         : %lu\n", i, pfm_get_cpu_data(pmu_activation_number,i));
 	}
 
-	if (hweight64(PFM_CPU_ONLINE_MAP) == 1)
+	if (num_online_cpus() == 1)
 	{
 		psr = pfm_get_psr();
 		ia64_srlz_d();
diff -puN arch/ia64/kernel/setup.c~cpumask_t-1 arch/ia64/kernel/setup.c
--- 25/arch/ia64/kernel/setup.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/setup.c	2003-07-03 01:19:48.000000000 -0700
@@ -558,7 +558,7 @@ static void *
 c_start (struct seq_file *m, loff_t *pos)
 {
 #ifdef CONFIG_SMP
-	while (*pos < NR_CPUS && !(cpu_online_map & (1UL << *pos)))
+	while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
 		++*pos;
 #endif
 	return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
diff -puN arch/ia64/kernel/smpboot.c~cpumask_t-1 arch/ia64/kernel/smpboot.c
--- 25/arch/ia64/kernel/smpboot.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/smpboot.c	2003-07-03 01:19:48.000000000 -0700
@@ -79,13 +79,13 @@ int cpucount;
 task_t *task_for_booting_cpu;
 
 /* Bitmask of currently online CPUs */
-volatile unsigned long cpu_online_map;
-unsigned long phys_cpu_present_map;
+cpumask_t cpu_online_map;
+cpumask_t phys_cpu_present_map;
 
 /* which logical CPU number maps to which CPU (physical APIC ID) */
 volatile int ia64_cpu_to_sapicid[NR_CPUS];
 
-static volatile unsigned long cpu_callin_map;
+static volatile cpumask_t cpu_callin_map;
 
 struct smp_boot_data smp_boot_data __initdata;
 
@@ -282,7 +282,7 @@ smp_callin (void)
 	cpuid = smp_processor_id();
 	phys_id = hard_smp_processor_id();
 
-	if (test_and_set_bit(cpuid, &cpu_online_map)) {
+	if (cpu_test_and_set(cpuid, cpu_online_map)) {
 		printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
 		       phys_id, cpuid);
 		BUG();
@@ -327,7 +327,7 @@ smp_callin (void)
 	/*
 	 * Allow the master to continue.
 	 */
-	set_bit(cpuid, &cpu_callin_map);
+	cpu_set(cpuid, cpu_callin_map);
 	Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
 }
 
@@ -391,19 +391,19 @@ do_boot_cpu (int sapicid, int cpu)
 	 */
 	Dprintk("Waiting on callin_map ...");
 	for (timeout = 0; timeout < 100000; timeout++) {
-		if (test_bit(cpu, &cpu_callin_map))
+		if (cpu_isset(cpu, cpu_callin_map))
 			break;  /* It has booted */
 		udelay(100);
 	}
 	Dprintk("\n");
 
-	if (test_bit(cpu, &cpu_callin_map)) {
+	if (cpu_isset(cpu, cpu_callin_map)) {
 		/* number CPUs logically, starting from 1 (BSP is 0) */
 		printk(KERN_INFO "CPU%d: CPU has booted.\n", cpu);
 	} else {
 		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
 		ia64_cpu_to_sapicid[cpu] = -1;
-		clear_bit(cpu, &cpu_online_map);  /* was set in smp_callin() */
+		cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
 		return -EINVAL;
 	}
 	return 0;
@@ -446,13 +446,14 @@ smp_build_cpu_map (void)
 		ia64_cpu_to_sapicid[cpu] = -1;
 
 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
-	phys_cpu_present_map = 1;
+	cpus_clear(phys_cpu_present_map);
+	cpu_set(0, phys_cpu_present_map);
 
 	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
 		sapicid = smp_boot_data.cpu_phys_id[i];
 		if (sapicid == boot_cpu_id)
 			continue;
-		phys_cpu_present_map |= (1UL << cpu);
+		cpu_set(cpu, phys_cpu_present_map);
 		ia64_cpu_to_sapicid[cpu] = sapicid;
 		cpu++;
 	}
@@ -463,7 +464,7 @@ smp_build_cpu_map (void)
 /* on which node is each logical CPU (one cacheline even for 64 CPUs) */
 volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
 /* which logical CPUs are on which nodes */
-volatile unsigned long node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
+volatile cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
 
 /*
  * Build cpu to node mapping and initialize the per node cpu masks.
@@ -474,7 +475,7 @@ build_cpu_to_node_map (void)
 	int cpu, i, node;
 
 	for(node=0; node<MAX_NUMNODES; node++)
-		node_to_cpu_mask[node] = 0;
+		cpus_clear(node_to_cpu_mask[node]);
 	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
 		/*
 		 * All Itanium NUMA platforms I know use ACPI, so maybe we
@@ -492,7 +493,7 @@ build_cpu_to_node_map (void)
 #endif
 		cpu_to_node_map[cpu] = node;
 		if (node >= 0)
-			node_to_cpu_mask[node] |= (1UL << cpu);
+			cpu_set(cpu, node_to_cpu_mask[node]);
 	}
 }
 
@@ -515,8 +516,8 @@ smp_prepare_cpus (unsigned int max_cpus)
 	/*
 	 * We have the boot CPU online for sure.
 	 */
-	set_bit(0, &cpu_online_map);
-	set_bit(0, &cpu_callin_map);
+	cpu_set(0, cpu_online_map);
+	cpu_set(0, cpu_callin_map);
 
 	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -531,15 +532,18 @@ smp_prepare_cpus (unsigned int max_cpus)
 	 */
 	if (!max_cpus) {
 		printk(KERN_INFO "SMP mode deactivated.\n");
-		cpu_online_map = phys_cpu_present_map = 1;
+		cpus_clear(cpu_online_map);
+		cpus_clear(phys_cpu_present_map);
+		cpu_set(1, cpu_online_map);
+		cpu_set(1, phys_cpu_present_map);
 		return;
 	}
 }
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_callin_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_callin_map);
 }
 
 void
diff -puN arch/ia64/kernel/smp.c~cpumask_t-1 arch/ia64/kernel/smp.c
--- 25/arch/ia64/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -81,7 +81,7 @@ stop_this_cpu (void)
 	/*
 	 * Remove this CPU:
 	 */
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	max_xtp();
 	local_irq_disable();
 	cpu_halt();
diff -puN arch/ia64/kernel/time.c~cpumask_t-1 arch/ia64/kernel/time.c
--- 25/arch/ia64/kernel/time.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/time.c	2003-07-03 01:19:48.000000000 -0700
@@ -40,13 +40,13 @@ unsigned long last_cli_ip;
 static void
 do_profile (unsigned long ip)
 {
-	extern unsigned long prof_cpu_mask;
+	extern cpumask_t prof_cpu_mask;
 	extern char _stext;
 
 	if (!prof_buffer)
 		return;
 
-	if (!((1UL << smp_processor_id()) & prof_cpu_mask))
+	if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
 		return;
 
 	ip -= (unsigned long) &_stext;
diff -puN arch/mips64/kernel/irq.c~cpumask_t-1 arch/mips64/kernel/irq.c
--- 25/arch/mips64/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/mips64/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -818,13 +818,13 @@ EXPORT_SYMBOL(probe_irq_mask);
 static struct proc_dir_entry * root_irq_dir;
 static struct proc_dir_entry * irq_dir [NR_IRQS];
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
 static unsigned int parse_hex_value (const char *buffer,
-		unsigned long count, unsigned long *ret)
+		unsigned long count, cpumask_t *ret)
 {
 	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	cpumask_t value = CPU_MASK_NONE;
 	int i;
 
 	if (!count)
@@ -838,10 +838,9 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
 
 	for (i = 0; i < count; i++) {
-		unsigned int c = hexnum[i];
+		unsigned int k, c = hexnum[i];
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -850,7 +849,10 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 4);
+		for (k = 0; k < 4; ++k)
+			if (c & (1 << k))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -861,20 +863,31 @@ out:
 
 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	int k, len = 0;
+	cpumask_t tmp = irq_affinity[(long)data];
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
+
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int irq_affinity_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
 	int irq = (long) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -886,7 +899,8 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	irq_affinity[irq] = new_value;
@@ -900,17 +914,28 @@ static int irq_affinity_write_proc (stru
 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
+	int k, len = 0;
+	cpumask_t *mask = (cpumask_t *)data, tmp;
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", *mask);
+
+	tmp = *mask;
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	unsigned long full_count = count, err;
+	cpumask_t new_value, *mask = (cpumask_t *)data;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
diff -puN arch/mips64/kernel/proc.c~cpumask_t-1 arch/mips64/kernel/proc.c
--- 25/arch/mips64/kernel/proc.c~cpumask_t-1	2003-07-03 01:19:47.000000000 -0700
+++ 25-akpm/arch/mips64/kernel/proc.c	2003-07-03 01:19:48.000000000 -0700
@@ -81,7 +81,7 @@ static int show_cpuinfo(struct seq_file 
 	char fmt [64];
 
 #ifdef CONFIG_SMP
-	if (!CPUMASK_TSTB(cpu_online_map, n))
+	if (!cpu_isset(n, cpu_online_map))
 		return 0;
 #endif
 
diff -puN arch/mips64/kernel/smp.c~cpumask_t-1 arch/mips64/kernel/smp.c
--- 25/arch/mips64/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/mips64/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -146,7 +146,7 @@ asmlinkage void start_secondary(void)
 	cpu_data[cpu].udelay_val = loops_per_jiffy;
 	prom_smp_finish();
 	printk("Slave cpu booted successfully\n");
-	CPUMASK_SETB(cpu_online_map, cpu);
+	cpu_set(cpu, cpu_online_map);
 	atomic_inc(&cpus_booted);
 	cpu_idle();
 }
@@ -250,7 +250,7 @@ static void stop_this_cpu(void *dummy)
 	/*
 	 * Remove this CPU:
 	 */
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	local_irq_enable();	/* May need to service _machine_restart IPI */
 	for (;;);		/* Wait if available. */
 }
diff -puN arch/mips/kernel/irq.c~cpumask_t-1 arch/mips/kernel/irq.c
--- 25/arch/mips/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/mips/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -861,20 +861,30 @@ out:
 
 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	int len, k;
+	cpumask_t tmp = irq_affinity[(long)data];
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int irq_affinity_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
 	int irq = (long) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -886,7 +896,8 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, tmp, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	irq_affinity[irq] = new_value;
@@ -900,17 +911,28 @@ static int irq_affinity_write_proc (stru
 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
+	int len, k;
+	cpumask_t *mask = (cpumask_t *)data, tmp;
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", *mask);
+	tmp = *mask;
+
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data, new_value;
+	unsigned long full_count = count, err;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
diff -puN arch/mips/kernel/proc.c~cpumask_t-1 arch/mips/kernel/proc.c
--- 25/arch/mips/kernel/proc.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/mips/kernel/proc.c	2003-07-03 01:19:48.000000000 -0700
@@ -81,7 +81,7 @@ static int show_cpuinfo(struct seq_file 
 	char fmt [64];
 
 #ifdef CONFIG_SMP
-	if (!CPUMASK_TSTB(cpu_online_map, n))
+	if (!cpu_isset(n, cpu_online_map))
 		return 0;
 #endif
 
diff -puN arch/mips/kernel/smp.c~cpumask_t-1 arch/mips/kernel/smp.c
--- 25/arch/mips/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/mips/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -146,7 +146,7 @@ asmlinkage void start_secondary(void)
 	cpu_data[cpu].udelay_val = loops_per_jiffy;
 	prom_smp_finish();
 	printk("Slave cpu booted successfully\n");
-	CPUMASK_SETB(cpu_online_map, cpu);
+	cpu_set(cpu, cpu_online_map);
 	atomic_inc(&cpus_booted);
 	cpu_idle();
 }
@@ -250,7 +250,7 @@ static void stop_this_cpu(void *dummy)
 	/*
 	 * Remove this CPU:
 	 */
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	local_irq_enable();	/* May need to service _machine_restart IPI */
 	for (;;);		/* Wait if available. */
 }
diff -puN arch/mips/sgi-ip27/ip27-init.c~cpumask_t-1 arch/mips/sgi-ip27/ip27-init.c
--- 25/arch/mips/sgi-ip27/ip27-init.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/mips/sgi-ip27/ip27-init.c	2003-07-03 01:19:48.000000000 -0700
@@ -481,7 +481,7 @@ static int __init do_boot_cpu(int cpu, i
 	 */
 	__cpu_number_map[cpu] = num_cpus;
 	__cpu_logical_map[num_cpus] = cpu;
-	CPUMASK_SETB(cpu_online_map, cpu);
+	cpu_set(cpu, cpu_online_map);
 
 	/*
 	 * Wait this cpu to start up and initialize its hub,
diff -puN arch/mips/sibyte/cfe/smp.c~cpumask_t-1 arch/mips/sibyte/cfe/smp.c
--- 25/arch/mips/sibyte/cfe/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/mips/sibyte/cfe/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -63,7 +63,7 @@ void prom_setup_smp(void)
 	/* Use CFE to find out how many CPUs are available */
 	for (i=1; i<NR_CPUS; i++) {
 		if (cfe_cpu_stop(i) == 0) {
-			CPUMASK_SETB(cpu_online_map, i);
+			cpu_set(i, cpu_online_map);
 		}
 	}
 	printk("Detected %i available CPU(s)\n", num_online_cpus());
diff -puN arch/mips/sibyte/sb1250/smp.c~cpumask_t-1 arch/mips/sibyte/sb1250/smp.c
--- 25/arch/mips/sibyte/sb1250/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/mips/sibyte/sb1250/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -107,8 +107,8 @@ void __init smp_boot_cpus(void)
 	current_thread_info()->cpu = 0;
 	cpu_data[0].udelay_val = loops_per_jiffy;
 	cpu_data[0].asid_cache = ASID_FIRST_VERSION;
-	CPUMASK_CLRALL(cpu_online_map);
-	CPUMASK_SETB(cpu_online_map, 0);
+	cpus_clear(cpu_online_map);
+	cpu_set(0, cpu_online_map);
 	atomic_set(&cpus_booted, 1);  /* Master CPU is already booted... */
 	smp_tune_scheduling();
 
diff -puN arch/parisc/kernel/smp.c~cpumask_t-1 arch/parisc/kernel/smp.c
--- 25/arch/parisc/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/parisc/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -62,14 +62,14 @@ volatile struct task_struct *smp_init_cu
 
 static volatile int smp_commenced = 0;   /* Set when the idlers are all forked */
 static volatile int cpu_now_booting = 0;      /* track which CPU is booting */
-volatile unsigned long cpu_online_map = 0;   /* Bitmap of online CPUs */
-#define IS_LOGGED_IN(cpunum) (test_bit(cpunum, (atomic_t *)&cpu_online_map))
+cpumask_t cpu_online_map = CPU_MASK_NONE;   /* Bitmap of online CPUs */
+#define IS_LOGGED_IN(cpunum) (cpu_isset(cpunum, cpu_online_map))
 
 int smp_num_cpus = 1;
 int smp_threads_ready = 0;
 unsigned long cache_decay_ticks;
 static int max_cpus = -1;			     /* Command line */
-unsigned long cpu_present_mask;
+cpumask_t cpu_present_mask;
 
 struct smp_call_struct {
 	void (*func) (void *info);
@@ -139,7 +139,7 @@ halt_processor(void) 
 #else
 	/* REVISIT : redirect I/O Interrupts to another CPU? */
 	/* REVISIT : does PM *know* this CPU isn't available? */
-	clear_bit(smp_processor_id(), (void *)&cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	local_irq_disable();
 	for (;;)
 		;
@@ -443,7 +443,7 @@ smp_cpu_init(int cpunum)
 	mb();
 
 	/* Well, support 2.4 linux scheme as well. */
-	if (test_and_set_bit(cpunum, (unsigned long *) (&cpu_online_map)))
+	if (cpu_test_and_set(cpunum, cpu_online_map))
 	{
 		extern void machine_halt(void); /* arch/parisc.../process.c */
 
@@ -624,13 +624,14 @@ void __init smp_boot_cpus(void)
 	printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
 	init_task.thread_info->cpu = bootstrap_processor; 
 	current->thread_info->cpu = bootstrap_processor;
-	cpu_online_map = 1 << bootstrap_processor; /* Mark Boostrap processor as present */
+	/* Mark Boostrap processor as present */
+	cpu_online_map = cpumask_of_cpu(bootstrap_processor);
 	current->active_mm = &init_mm;
 
 #ifdef ENTRY_SYS_CPUS
 	cpu_data[0].state = STATE_RUNNING;
 #endif
-	cpu_present_mask = 1UL << bootstrap_processor;
+	cpu_present_mask = cpumask_of_cpu(bootstrap_processor);
 
 	/* Nothing to do when told not to.  */
 	if (max_cpus == 0) {
@@ -709,8 +710,8 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_present_mask);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_present_mask);
 }
 
 int __devinit __cpu_up(unsigned int cpu)
diff -puN arch/ppc64/Kconfig~cpumask_t-1 arch/ppc64/Kconfig
--- 25/arch/ppc64/Kconfig~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/Kconfig	2003-07-03 01:19:48.000000000 -0700
@@ -93,7 +93,7 @@ config IRQ_ALL_CPUS
 	  CPU.
 
 config NR_CPUS
-	int "Maximum number of CPUs (2-64)"
+	int "Maximum number of CPUs (2-128)"
 	depends on SMP
 	default "32"
 
diff -puN arch/ppc64/kernel/htab.c~cpumask_t-1 arch/ppc64/kernel/htab.c
--- 25/arch/ppc64/kernel/htab.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/htab.c	2003-07-03 01:19:48.000000000 -0700
@@ -377,6 +377,7 @@ int hash_page(unsigned long ea, unsigned
 	int ret;
 	int user_region = 0;
 	int local = 0;
+	cpumask_t tmp;
 
 	/* Check for invalid addresses. */
 	if (!IS_VALID_EA(ea))
@@ -431,7 +432,8 @@ int hash_page(unsigned long ea, unsigned
 	 */
 	spin_lock(&mm->page_table_lock);
 
-	if (user_region && (mm->cpu_vm_mask == (1 << smp_processor_id())))
+	tmp = cpumask_of_cpu(smp_processor_id());
+	if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
 		local = 1;
 
 	ptep = find_linux_pte(pgdir, ea);
diff -puN arch/ppc64/kernel/irq.c~cpumask_t-1 arch/ppc64/kernel/irq.c
--- 25/arch/ppc64/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -603,26 +603,37 @@ static struct proc_dir_entry * irq_dir [
 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 
 #ifdef CONFIG_IRQ_ALL_CPUS
-unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = -1UL};
+cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
 #else  /* CONFIG_IRQ_ALL_CPUS */
-unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x0};
+cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_NONE };
 #endif /* CONFIG_IRQ_ALL_CPUS */
 
-#define HEX_DIGITS 16
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	int k, len;
+	cpumask_t tmp = irq_affinity[(long)data];
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf(page, "%16lx\n", irq_affinity[(long)data]);
+
+	for (k = 0; k < sizeof(cpumask_t) / sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static unsigned int parse_hex_value (const char *buffer,
-		unsigned long count, unsigned long *ret)
+		unsigned long count, cpumask_t *ret)
 {
 	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	cpumask_t value = CPU_MASK_NONE;
 	int i;
 
 	if (!count)
@@ -636,10 +647,10 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 16 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
 
 	for (i = 0; i < count; i++) {
 		unsigned int c = hexnum[i];
+		int k;
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -648,7 +659,11 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 4);
+		for (k = 0; k < 4; ++k)
+			if (test_bit(k, (unsigned long *)&c))
+				cpu_set(k, value);
+
 	}
 out:
 	*ret = value;
@@ -659,7 +674,7 @@ static int irq_affinity_write_proc (stru
 					unsigned long count, void *data)
 {
 	int irq = (long)data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -671,7 +686,8 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	irq_affinity[irq] = new_value;
@@ -692,8 +708,9 @@ static int prof_cpu_mask_read_proc (char
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data;
+	unsigned long full_count = count, err;
+	cpumask_t new_value;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
diff -puN arch/ppc64/kernel/open_pic.c~cpumask_t-1 arch/ppc64/kernel/open_pic.c
--- 25/arch/ppc64/kernel/open_pic.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/open_pic.c	2003-07-03 01:19:48.000000000 -0700
@@ -46,7 +46,7 @@ static int broken_ipi_registers;
 OpenPIC_SourcePtr ISU[OPENPIC_MAX_ISU];
 
 static void openpic_end_irq(unsigned int irq_nr);
-static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask);
+static void openpic_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
 
 struct hw_interrupt_type open_pic = {
 	" OpenPIC  ",
@@ -505,7 +505,7 @@ static void openpic_set_spurious(u_int v
 void openpic_init_processor(u_int cpumask)
 {
 	openpic_write(&OpenPIC->Global.Processor_Initialization,
-		      cpumask & cpu_online_map);
+		      cpumask & cpus_coerce(cpu_online_map));
 }
 
 #ifdef CONFIG_SMP
@@ -539,7 +539,7 @@ void openpic_cause_IPI(u_int ipi, u_int 
 	CHECK_THIS_CPU;
 	check_arg_ipi(ipi);
 	openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
-		      cpumask & cpu_online_map);
+		      cpumask & cpus_coerce(cpu_online_map));
 }
 
 void openpic_request_IPIs(void)
@@ -625,7 +625,7 @@ static void __init openpic_maptimer(u_in
 {
 	check_arg_timer(timer);
 	openpic_write(&OpenPIC->Global.Timer[timer].Destination,
-		      cpumask & cpu_online_map);
+		      cpumask & cpus_coerce(cpu_online_map));
 }
 
 
@@ -746,9 +746,12 @@ static void openpic_end_irq(unsigned int
 		openpic_eoi();
 }
 
-static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask)
+static void openpic_set_affinity(unsigned int irq_nr, cpumask_t cpumask)
 {
-	openpic_mapirq(irq_nr - open_pic_irq_offset, cpumask & cpu_online_map);
+	cpumask_t tmp;
+
+	cpus_and(tmp, cpumask, cpu_online_map);
+	openpic_mapirq(irq_nr - open_pic_irq_offset, cpus_coerce(tmp));
 }
 
 #ifdef CONFIG_SMP
diff -puN arch/ppc64/kernel/open_pic.h~cpumask_t-1 arch/ppc64/kernel/open_pic.h
--- 25/arch/ppc64/kernel/open_pic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/open_pic.h	2003-07-03 01:19:48.000000000 -0700
@@ -13,6 +13,7 @@
 #define _PPC64_KERNEL_OPEN_PIC_H
 
 #include <linux/config.h>
+#include <linux/cpumask.h>
 
 #define OPENPIC_SIZE	0x40000
 
diff -puN arch/ppc64/kernel/pacaData.c~cpumask_t-1 arch/ppc64/kernel/pacaData.c
--- 25/arch/ppc64/kernel/pacaData.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/pacaData.c	2003-07-03 01:19:48.000000000 -0700
@@ -134,5 +134,71 @@ struct paca_struct paca[NR_CPUS] __page_
 	PACAINITDATA(61, 0, 0, 0, 0),
 	PACAINITDATA(62, 0, 0, 0, 0),
 	PACAINITDATA(63, 0, 0, 0, 0),
+#if NR_CPUS > 64
+	PACAINITDATA(64, 0, 0, 0, 0),
+	PACAINITDATA(65, 0, 0, 0, 0),
+	PACAINITDATA(66, 0, 0, 0, 0),
+	PACAINITDATA(67, 0, 0, 0, 0),
+	PACAINITDATA(68, 0, 0, 0, 0),
+	PACAINITDATA(69, 0, 0, 0, 0),
+	PACAINITDATA(70, 0, 0, 0, 0),
+	PACAINITDATA(71, 0, 0, 0, 0),
+	PACAINITDATA(72, 0, 0, 0, 0),
+	PACAINITDATA(73, 0, 0, 0, 0),
+	PACAINITDATA(74, 0, 0, 0, 0),
+	PACAINITDATA(75, 0, 0, 0, 0),
+	PACAINITDATA(76, 0, 0, 0, 0),
+	PACAINITDATA(77, 0, 0, 0, 0),
+	PACAINITDATA(78, 0, 0, 0, 0),
+	PACAINITDATA(79, 0, 0, 0, 0),
+	PACAINITDATA(80, 0, 0, 0, 0),
+	PACAINITDATA(81, 0, 0, 0, 0),
+	PACAINITDATA(82, 0, 0, 0, 0),
+	PACAINITDATA(83, 0, 0, 0, 0),
+	PACAINITDATA(84, 0, 0, 0, 0),
+	PACAINITDATA(85, 0, 0, 0, 0),
+	PACAINITDATA(86, 0, 0, 0, 0),
+	PACAINITDATA(87, 0, 0, 0, 0),
+	PACAINITDATA(88, 0, 0, 0, 0),
+	PACAINITDATA(89, 0, 0, 0, 0),
+	PACAINITDATA(90, 0, 0, 0, 0),
+	PACAINITDATA(91, 0, 0, 0, 0),
+	PACAINITDATA(92, 0, 0, 0, 0),
+	PACAINITDATA(93, 0, 0, 0, 0),
+	PACAINITDATA(94, 0, 0, 0, 0),
+	PACAINITDATA(95, 0, 0, 0, 0),
+	PACAINITDATA(96, 0, 0, 0, 0),
+	PACAINITDATA(97, 0, 0, 0, 0),
+	PACAINITDATA(98, 0, 0, 0, 0),
+	PACAINITDATA(99, 0, 0, 0, 0),
+	PACAINITDATA(100, 0, 0, 0, 0),
+	PACAINITDATA(101, 0, 0, 0, 0),
+	PACAINITDATA(102, 0, 0, 0, 0),
+	PACAINITDATA(103, 0, 0, 0, 0),
+	PACAINITDATA(104, 0, 0, 0, 0),
+	PACAINITDATA(105, 0, 0, 0, 0),
+	PACAINITDATA(106, 0, 0, 0, 0),
+	PACAINITDATA(107, 0, 0, 0, 0),
+	PACAINITDATA(108, 0, 0, 0, 0),
+	PACAINITDATA(109, 0, 0, 0, 0),
+	PACAINITDATA(110, 0, 0, 0, 0),
+	PACAINITDATA(111, 0, 0, 0, 0),
+	PACAINITDATA(112, 0, 0, 0, 0),
+	PACAINITDATA(113, 0, 0, 0, 0),
+	PACAINITDATA(114, 0, 0, 0, 0),
+	PACAINITDATA(115, 0, 0, 0, 0),
+	PACAINITDATA(116, 0, 0, 0, 0),
+	PACAINITDATA(117, 0, 0, 0, 0),
+	PACAINITDATA(118, 0, 0, 0, 0),
+	PACAINITDATA(119, 0, 0, 0, 0),
+	PACAINITDATA(120, 0, 0, 0, 0),
+	PACAINITDATA(121, 0, 0, 0, 0),
+	PACAINITDATA(122, 0, 0, 0, 0),
+	PACAINITDATA(123, 0, 0, 0, 0),
+	PACAINITDATA(124, 0, 0, 0, 0),
+	PACAINITDATA(125, 0, 0, 0, 0),
+	PACAINITDATA(126, 0, 0, 0, 0),
+	PACAINITDATA(127, 0, 0, 0, 0),
+#endif
 #endif
 };
diff -puN arch/ppc64/kernel/prom.c~cpumask_t-1 arch/ppc64/kernel/prom.c
--- 25/arch/ppc64/kernel/prom.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/prom.c	2003-07-03 01:19:48.000000000 -0700
@@ -1134,7 +1134,7 @@ prom_init(unsigned long r3, unsigned lon
 	_prom->cpu = (int)(unsigned long)getprop_rval;
 	_xPaca[_prom->cpu].active = 1;
 #ifdef CONFIG_SMP
-	RELOC(cpu_online_map) = 1UL << _prom->cpu;
+	cpu_set(_prom->cpu, RELOC(cpu_online_map));
 #endif
 	RELOC(boot_cpuid) = _prom->cpu;
 
diff -puN arch/ppc64/kernel/rtasd.c~cpumask_t-1 arch/ppc64/kernel/rtasd.c
--- 25/arch/ppc64/kernel/rtasd.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/rtasd.c	2003-07-03 01:19:48.000000000 -0700
@@ -225,7 +225,7 @@ repeat:
 			continue;
 
 		DEBUG("scheduling on %d\n", cpu);
-		set_cpus_allowed(current, 1UL << cpu);
+		set_cpus_allowed(current, cpumask_of_cpu(cpu));
 		DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());
 
 		do {
diff -puN arch/ppc64/kernel/setup.c~cpumask_t-1 arch/ppc64/kernel/setup.c
--- 25/arch/ppc64/kernel/setup.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/setup.c	2003-07-03 01:19:48.000000000 -0700
@@ -256,7 +256,7 @@ static int show_cpuinfo(struct seq_file 
 		return 0;
 	}
 
-	if (!(cpu_online_map & (1UL << cpu_id)))
+	if (!cpu_online(cpu_id))
 		return 0;
 
 #ifdef CONFIG_SMP
diff -puN arch/ppc64/kernel/smp.c~cpumask_t-1 arch/ppc64/kernel/smp.c
--- 25/arch/ppc64/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -53,7 +53,7 @@ int smp_threads_ready;
 unsigned long cache_decay_ticks;
 
 /* initialised so it doesn't end up in bss */
-unsigned long cpu_online_map = 0;
+cpumask_t cpu_online_map = CPU_MASK_NONE;
 
 static struct smp_ops_t *smp_ops;
 
@@ -570,7 +570,7 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
 	/* FIXME: what about cpu_possible()? */
 }
 
@@ -631,7 +631,7 @@ int __devinit __cpu_up(unsigned int cpu)
 
 	if (smp_ops->give_timebase)
 		smp_ops->give_timebase();
-	set_bit(cpu, &cpu_online_map);
+	cpu_set(cpu, cpu_online_map);
 	return 0;
 }
 
diff -puN arch/ppc64/kernel/xics.c~cpumask_t-1 arch/ppc64/kernel/xics.c
--- 25/arch/ppc64/kernel/xics.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/kernel/xics.c	2003-07-03 01:19:48.000000000 -0700
@@ -33,7 +33,7 @@ void xics_enable_irq(u_int irq);
 void xics_disable_irq(u_int irq);
 void xics_mask_and_ack_irq(u_int irq);
 void xics_end_irq(u_int irq);
-void xics_set_affinity(unsigned int irq_nr, unsigned long cpumask);
+void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
 
 struct hw_interrupt_type xics_pic = {
 	" XICS     ",
@@ -508,7 +508,7 @@ nextnode:
 	ppc64_boot_msg(0x21, "XICS Done");
 }
 
-void xics_set_affinity(unsigned int virq, unsigned long cpumask)
+void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
 {
         irq_desc_t *desc = irq_desc + virq;
 	unsigned int irq;
@@ -516,6 +516,8 @@ void xics_set_affinity(unsigned int virq
 	long status;
 	unsigned long xics_status[2];
 	unsigned long newmask;
+	cpumask_t allcpus = CPU_MASK_ALL;
+	cpumask_t tmp = CPU_MASK_NONE;
 
 	virq -= XICS_IRQ_OFFSET;
 	irq = virt_irq_to_real(virq);
@@ -533,12 +535,13 @@ void xics_set_affinity(unsigned int virq
 	}
 
 	/* For the moment only implement delivery to all cpus or one cpu */
-	if (cpumask == -1UL) {
+	if (cpus_equal(cpumask, allcpus)) {
 		newmask = default_distrib_server;
 	} else {
-		if (!(cpumask & cpu_online_map))
+		cpus_and(tmp, cpu_online_map, cpumask);
+		if (cpus_empty(tmp))
 			goto out;
-		newmask = find_first_bit(&cpumask, 8*sizeof(unsigned long));
+		newmask = first_cpu(cpumask);
 	}
 
 	status = rtas_call(ibm_set_xive, 3, 1, NULL,
diff -puN arch/ppc64/mm/init.c~cpumask_t-1 arch/ppc64/mm/init.c
--- 25/arch/ppc64/mm/init.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc64/mm/init.c	2003-07-03 01:19:48.000000000 -0700
@@ -253,7 +253,7 @@ flush_tlb_mm(struct mm_struct *mm)
 		__flush_tlb_range(mm, mp->vm_start, mp->vm_end);
 
 	/* XXX are there races with checking cpu_vm_mask? - Anton */
-	mm->cpu_vm_mask = 0;
+	cpus_clear(mm->cpu_vm_mask);
 
 	spin_unlock(&mm->page_table_lock);
 }
@@ -270,6 +270,7 @@ flush_tlb_page(struct vm_area_struct *vm
 	pte_t *ptep;
 	pte_t pte;
 	int local = 0;
+	cpumask_t tmp;
 
 	switch( REGION_ID(vmaddr) ) {
 	case VMALLOC_REGION_ID:
@@ -283,7 +284,8 @@ flush_tlb_page(struct vm_area_struct *vm
 		context = vma->vm_mm->context;
 
 		/* XXX are there races with checking cpu_vm_mask? - Anton */
-		if (vma->vm_mm->cpu_vm_mask == (1 << smp_processor_id()))
+		tmp = cpumask_of_cpu(smp_processor_id());
+		if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
 			local = 1;
 
 		break;
@@ -319,6 +321,7 @@ __flush_tlb_range(struct mm_struct *mm, 
 	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
 	unsigned long i = 0;
 	int local = 0;
+	cpumask_t tmp;
 
 	switch(REGION_ID(start)) {
 	case VMALLOC_REGION_ID:
@@ -332,7 +335,8 @@ __flush_tlb_range(struct mm_struct *mm, 
 		context = mm->context;
 
 		/* XXX are there races with checking cpu_vm_mask? - Anton */
-		if (mm->cpu_vm_mask == (1 << smp_processor_id()))
+		tmp = cpumask_of_cpu(smp_processor_id());
+		if (cpus_equal(mm->cpu_vm_mask, tmp))
 			local = 1;
 
 		break;
@@ -698,6 +702,7 @@ void update_mmu_cache(struct vm_area_str
 	void *pgdir;
 	pte_t *ptep;
 	int local = 0;
+	cpumask_t tmp;
 
 	/* handle i-cache coherency */
 	if (!(cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)) {
@@ -723,7 +728,8 @@ void update_mmu_cache(struct vm_area_str
 	ptep = find_linux_pte(pgdir, ea);
 	vsid = get_vsid(vma->vm_mm->context, ea);
 
-	if (vma->vm_mm->cpu_vm_mask == (1 << smp_processor_id()))
+	tmp = cpumask_of_cpu(smp_processor_id());
+	if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
 		local = 1;
 
 	__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
diff -puN arch/ppc/kernel/irq.c~cpumask_t-1 arch/ppc/kernel/irq.c
--- 25/arch/ppc/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -44,6 +44,7 @@
 #include <linux/proc_fs.h>
 #include <linux/random.h>
 #include <linux/seq_file.h>
+#include <linux/cpumask.h>
 
 #include <asm/uaccess.h>
 #include <asm/bitops.h>
@@ -565,29 +566,41 @@ static struct proc_dir_entry *irq_dir[NR
 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
 
 #ifdef CONFIG_IRQ_ALL_CPUS
-#define DEFAULT_CPU_AFFINITY 0xffffffff
+#define DEFAULT_CPU_AFFINITY CPU_MASK_ALL
 #else
-#define DEFAULT_CPU_AFFINITY 0x00000001
+#define DEFAULT_CPU_AFFINITY cpumask_of_cpu(0)
 #endif
 
-unsigned int irq_affinity [NR_IRQS] =
+cpumask_t irq_affinity [NR_IRQS] =
 	{ [0 ... NR_IRQS-1] = DEFAULT_CPU_AFFINITY };
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	cpumask_t tmp = irq_affinity[(long)data];
+	int k, len = 0;
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08x\n", irq_affinity[(int)data]);
+
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static unsigned int parse_hex_value (const char __user *buffer,
-		unsigned long count, unsigned long *ret)
+		unsigned long count, cpumask_t *ret)
 {
 	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	cpumask_t value = CPU_MASK_NONE;
 	int i;
 
 	if (!count)
@@ -601,10 +614,9 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
-
 	for (i = 0; i < count; i++) {
 		unsigned int c = hexnum[i];
+		int k;
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -613,7 +625,10 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 4);
+		for (k = 0; k < 4; ++k)
+			if (c & (1 << k))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -624,7 +639,7 @@ static int irq_affinity_write_proc (stru
 					unsigned long count, void *data)
 {
 	int irq = (int) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -641,7 +656,8 @@ static int irq_affinity_write_proc (stru
 	 * are actually logical cpu #'s then we have no problem.
 	 *  -- Cort <cort@fsmlabs.com>
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	irq_affinity[irq] = new_value;
@@ -653,17 +669,27 @@ static int irq_affinity_write_proc (stru
 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
+	cpumask_t mask = *(cpumask_t *)data;
+	int k, len = 0;
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", *mask);
+
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(mask));
+		len += j;
+		page += j;
+		cpus_shift_right(mask, mask, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data, full_count = count, err;
+	cpumask_t new_value;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
diff -puN arch/ppc/kernel/setup.c~cpumask_t-1 arch/ppc/kernel/setup.c
--- 25/arch/ppc/kernel/setup.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc/kernel/setup.c	2003-07-03 01:19:48.000000000 -0700
@@ -159,7 +159,7 @@ int show_cpuinfo(struct seq_file *m, voi
 	}
 
 #ifdef CONFIG_SMP
-	if (!(cpu_online_map & (1 << i)))
+	if (!cpu_online(i))
 		return 0;
 	pvr = cpu_data[i].pvr;
 	lpj = cpu_data[i].loops_per_jiffy;
diff -puN arch/ppc/kernel/smp.c~cpumask_t-1 arch/ppc/kernel/smp.c
--- 25/arch/ppc/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/ppc/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -47,7 +47,7 @@ atomic_t ipi_sent;
 DEFINE_PER_CPU(unsigned int, prof_multiplier);
 DEFINE_PER_CPU(unsigned int, prof_counter);
 unsigned long cache_decay_ticks = HZ/100;
-unsigned long cpu_online_map = 1UL;
+unsigned long cpu_online_map = cpumask_of_cpu(0);
 unsigned long cpu_possible_map = 1UL;
 int smp_hw_index[NR_CPUS];
 struct thread_info *secondary_ti;
@@ -361,8 +361,8 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_possible_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_possible_map);
 }
 
 int __init setup_profiling_timer(unsigned int multiplier)
@@ -444,7 +444,7 @@ int __cpu_up(unsigned int cpu)
 	printk("Processor %d found.\n", cpu);
 
 	smp_ops->give_timebase();
-	set_bit(cpu, &cpu_online_map);
+	cpu_set(cpu, cpu_online_map);
 	return 0;
 }
 
diff -puN arch/s390/kernel/setup.c~cpumask_t-1 arch/s390/kernel/setup.c
--- 25/arch/s390/kernel/setup.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/s390/kernel/setup.c	2003-07-03 01:19:48.000000000 -0700
@@ -562,7 +562,7 @@ static int show_cpuinfo(struct seq_file 
 			       num_online_cpus(), loops_per_jiffy/(500000/HZ),
 			       (loops_per_jiffy/(5000/HZ))%100);
 	}
-	if (cpu_online_map & (1 << n)) {
+	if (cpu_online(n)) {
 #ifdef CONFIG_SMP
 		if (smp_processor_id() == n)
 			cpuinfo = &S390_lowcore.cpu_data;
diff -puN arch/s390/kernel/smp.c~cpumask_t-1 arch/s390/kernel/smp.c
--- 25/arch/s390/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/s390/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -51,8 +51,8 @@ struct _lowcore *lowcore_ptr[NR_CPUS];
 cycles_t         cacheflush_time=0;
 int              smp_threads_ready=0;      /* Set when the idlers are all forked. */
 
-volatile unsigned long cpu_online_map;
-volatile unsigned long cpu_possible_map;
+cpumask_t cpu_online_map;
+cpumask_t cpu_possible_map;
 unsigned long    cache_decay_ticks = 0;
 
 /*
@@ -200,14 +200,14 @@ void smp_send_stop(void)
 /*
  * Reboot, halt and power_off routines for SMP.
  */
-static volatile unsigned long cpu_restart_map;
+static cpumask_t cpu_restart_map;
 
 static void do_machine_restart(void * __unused)
 {
-	clear_bit(smp_processor_id(), &cpu_restart_map);
+	cpu_clear(smp_processor_id(), cpu_restart_map);
 	if (smp_processor_id() == 0) {
 		/* Wait for all other cpus to enter do_machine_restart. */
-		while (cpu_restart_map != 0);
+		while (!cpus_empty(cpu_restart_map));
 		/* Store status of other cpus. */
 		do_store_status();
 		/*
@@ -452,7 +452,7 @@ int __devinit start_secondary(void *cpuv
 	pfault_init();
 #endif
 	/* Mark this cpu as online */
-	set_bit(smp_processor_id(), &cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
 	/* Switch on interrupts */
 	local_irq_enable();
         /* Print info about this processor */
@@ -558,8 +558,8 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_possible_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_possible_map);
 }
 
 void smp_cpus_done(unsigned int max_cpus)
diff -puN arch/sparc64/kernel/irq.c~cpumask_t-1 arch/sparc64/kernel/irq.c
--- 25/arch/sparc64/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/sparc64/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -658,11 +658,11 @@ static inline void redirect_intr(int cpu
 	 *    Just Do It.
 	 */
 	struct irqaction *ap = bp->irq_info;
-	unsigned long cpu_mask = get_smpaff_in_irqaction(ap);
+	cpumask_t cpu_mask = get_smpaff_in_irqaction(ap);
 	unsigned int buddy, ticks;
 
-	cpu_mask &= cpu_online_map;
-	if (cpu_mask == 0)
+	cpus_and(cpu_mask, cpu_mask, cpu_online_map);
+	if (cpus_empty(cpu_mask))
 		cpu_mask = cpu_online_map;
 
 	if (this_is_starfire != 0 ||
@@ -677,7 +677,7 @@ static inline void redirect_intr(int cpu
 		buddy = 0;
 
 	ticks = 0;
-	while ((cpu_mask & (1UL << buddy)) == 0) {
+	while (!cpu_isset(buddy, cpu_mask)) {
 		if (++buddy >= NR_CPUS)
 			buddy = 0;
 		if (++ticks > NR_CPUS) {
diff -puN arch/sparc64/kernel/smp.c~cpumask_t-1 arch/sparc64/kernel/smp.c
--- 25/arch/sparc64/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/sparc64/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -47,11 +47,11 @@ cpuinfo_sparc cpu_data[NR_CPUS];
 static unsigned char boot_cpu_id;
 
 atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0);
-unsigned long cpu_online_map = 0;
+unsigned long cpu_online_map = CPU_MASK_NONE;
 atomic_t sparc64_num_cpus_possible = ATOMIC_INIT(0);
-unsigned long phys_cpu_present_map = 0;
-static unsigned long smp_commenced_mask;
-static unsigned long cpu_callout_map;
+cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+static cpumask_t smp_commenced_mask;
+static cpumask_t cpu_callout_map;
 
 void smp_info(struct seq_file *m)
 {
@@ -151,10 +151,10 @@ void __init smp_callin(void)
 	atomic_inc(&init_mm.mm_count);
 	current->active_mm = &init_mm;
 
-	while (!test_bit(cpuid, &smp_commenced_mask))
+	while (!cpu_isset(cpuid, smp_commenced_mask))
 		membar("#LoadLoad");
 
-	set_bit(cpuid, &cpu_online_map);
+	cpu_set(cpuid, cpu_online_map);
 	atomic_inc(&sparc64_num_cpus_online);
 }
 
@@ -334,7 +334,7 @@ static int __devinit smp_boot_one_cpu(un
 		if (linux_cpus[no].mid == cpu)
 			break;
 	cpu_new_thread = p->thread_info;
-	set_bit(cpu, &cpu_callout_map);
+	cpu_set(cpu, cpu_callout_map);
 	prom_startcpu(linux_cpus[no].prom_node, entry, cookie);
 	for (timeout = 0; timeout < 5000000; timeout++) {
 		if (callin_flag)
@@ -346,7 +346,7 @@ static int __devinit smp_boot_one_cpu(un
 		ret = 0;
 	} else {
 		printk("Processor %d is stuck.\n", cpu);
-		clear_bit(cpu, &cpu_callout_map);
+		cpu_clear(cpu, cpu_callout_map);
 		ret = -ENODEV;
 	}
 	cpu_new_thread = NULL;
@@ -549,12 +549,12 @@ retry:
 /* Send cross call to all processors mentioned in MASK
  * except self.
  */
-static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, unsigned long mask)
+static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
 {
 	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
 
-	mask &= cpu_online_map;
-	mask &= ~(1UL<<smp_processor_id());
+	cpus_and(mask, mask, cpu_online_map);
+	cpu_clear(smp_processor_id(), mask);
 
 	if (tlb_type == spitfire)
 		spitfire_xcall_deliver(data0, data1, data2, mask);
@@ -685,14 +685,14 @@ static __inline__ void __local_flush_dca
 
 void smp_flush_dcache_page_impl(struct page *page, int cpu)
 {
-	unsigned long mask = 1UL << cpu;
+	cpumask_t mask = cpumask_of_cpu(cpu);
 
 #ifdef CONFIG_DEBUG_DCFLUSH
 	atomic_inc(&dcpage_flushes);
 #endif
 	if (cpu == smp_processor_id()) {
 		__local_flush_dcache_page(page);
-	} else if ((cpu_online_map & mask) != 0) {
+	} else if (cpu_online(cpu)) {
 		u64 data0;
 
 		if (tlb_type == spitfire) {
@@ -719,13 +719,14 @@ void smp_flush_dcache_page_impl(struct p
 
 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 {
-	unsigned long mask = cpu_online_map & ~(1UL << smp_processor_id());
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
 	u64 data0;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
 	atomic_inc(&dcpage_flushes);
 #endif
-	if (mask == 0UL)
+	if (cpus_empty(mask))
 		goto flush_self;
 	if (tlb_type == spitfire) {
 		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
@@ -750,9 +751,9 @@ void flush_dcache_page_all(struct mm_str
 
 void smp_receive_signal(int cpu)
 {
-	unsigned long mask = 1UL << cpu;
+	unsigned long mask = cpumask_of_cpu(cpu);
 
-	if ((cpu_online_map & mask) != 0) {
+	if (cpu_online(cpu)) {
 		u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
 
 		if (tlb_type == spitfire)
@@ -1123,7 +1124,7 @@ void __init smp_tick_init(void)
 	}
 
 	atomic_inc(&sparc64_num_cpus_online);
-	set_bit(boot_cpu_id, &cpu_online_map);
+	cpu_set(boot_cpu_id, cpu_online_map);
 	prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
 	prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
 }
@@ -1241,16 +1242,14 @@ void __init smp_prepare_cpus(unsigned in
 
 	for (i = 0; i < linux_num_cpus; i++) {
 		if (linux_cpus[i].mid < max_cpus) {
-			set_bit(linux_cpus[i].mid,
-				&phys_cpu_present_map);
+			cpu_set(linux_cpus[i].mid, phys_cpu_present_map);
 			atomic_inc(&sparc64_num_cpus_possible);
 		}
 	}
 	if (atomic_read(&sparc64_num_cpus_possible) > max_cpus) {
 		for (i = linux_num_cpus - 1; i >= 0; i--) {
 			if (linux_cpus[i].mid != boot_cpu_id) {
-				clear_bit(linux_cpus[i].mid,
-					  &phys_cpu_present_map);
+				cpu_clear(linux_cpus[i].mid, phys_cpu_present_map);
 				atomic_dec(&sparc64_num_cpus_possible);
 				if (atomic_read(&sparc64_num_cpus_possible) <= max_cpus)
 					break;
@@ -1263,8 +1262,8 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &phys_cpu_present_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), phys_cpu_present_map);
 }
 
 int __devinit __cpu_up(unsigned int cpu)
@@ -1272,10 +1271,10 @@ int __devinit __cpu_up(unsigned int cpu)
 	int ret = smp_boot_one_cpu(cpu);
 
 	if (!ret) {
-		set_bit(cpu, &smp_commenced_mask);
-		while (!test_bit(cpu, &cpu_online_map))
+		cpu_set(cpu, smp_commenced_mask);
+		while (!cpu_isset(cpu, cpu_online_map))
 			mb();
-		if (!test_bit(cpu, &cpu_online_map)) {
+		if (!cpu_isset(cpu, cpu_online_map)) {
 			ret = -ENODEV;
 		} else {
 			smp_synchronize_one_tick(cpu);
diff -puN arch/um/kernel/irq.c~cpumask_t-1 arch/um/kernel/irq.c
--- 25/arch/um/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/um/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -565,9 +565,9 @@ static struct proc_dir_entry * smp_affin
 /* These are read and written as longs, so a read won't see a partial write
  * even during a race.
  */
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
@@ -578,10 +578,10 @@ static int irq_affinity_read_proc (char 
 }
 
 static unsigned int parse_hex_value (const char *buffer,
-		unsigned long count, unsigned long *ret)
+		unsigned long count, cpumask_t *ret)
 {
 	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	cpumask_t value = CPU_MASK_NONE;
 	int i;
 
 	if (!count)
@@ -595,10 +595,9 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
 
 	for (i = 0; i < count; i++) {
-		unsigned int c = hexnum[i];
+		unsigned int k, c = hexnum[i];
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -607,7 +606,10 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 16);
+		for (k = 0; k < 4; ++k)
+			if (c & (1 << k))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -618,7 +620,7 @@ static int irq_affinity_write_proc (stru
 					unsigned long count, void *data)
 {
 	int irq = (long) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -631,7 +633,8 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 #endif
 
@@ -644,17 +647,27 @@ static int irq_affinity_write_proc (stru
 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
+	cpumask_t tmp, *mask = (cpumask_t *) data;
+	int k, len = 0;
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", *mask);
+	tmp = *mask;
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data, new_value;
+	unsigned long full_count = count, err;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
@@ -693,7 +706,7 @@ static void register_irq_proc (unsigned 
 }
 
 /* Read and written as a long */
-unsigned long prof_cpu_mask = -1;
+cpumask_t prof_cpu_mask = CPU_MASK_ALL;
 
 void __init init_irq_proc (void)
 {
diff -puN arch/um/kernel/skas/process_kern.c~cpumask_t-1 arch/um/kernel/skas/process_kern.c
--- 25/arch/um/kernel/skas/process_kern.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/um/kernel/skas/process_kern.c	2003-07-03 01:19:48.000000000 -0700
@@ -152,7 +152,7 @@ static int start_kernel_proc(void *unuse
 	cpu_tasks[0].pid = pid;
 	cpu_tasks[0].task = current;
 #ifdef CONFIG_SMP
- 	cpu_online_map = 1;
+ 	cpu_online_map = cpumask_of_cpu(0);
 #endif
 	start_kernel();
 	return(0);
diff -puN arch/um/kernel/smp.c~cpumask_t-1 arch/um/kernel/smp.c
--- 25/arch/um/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/um/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -5,9 +5,6 @@
 
 #include "linux/config.h"
 
-/* CPU online map, set by smp_boot_cpus */
-unsigned long cpu_online_map = 1;
-
 #ifdef CONFIG_SMP
 
 #include "linux/sched.h"
@@ -24,6 +21,9 @@ unsigned long cpu_online_map = 1;
 #include "irq_user.h"
 #include "os.h"
 
+/* CPU online map, set by smp_boot_cpus */
+unsigned long cpu_online_map = cpumask_of_cpu(0);
+
 /* Per CPU bogomips and other parameters
  * The only piece used here is the ipi pipe, which is set before SMP is
  * started and never changed.
@@ -104,8 +104,8 @@ void smp_send_stop(void)
 	printk("done\n");
 }
 
-static unsigned long smp_commenced_mask;
-static volatile unsigned long smp_callin_map = 0;
+static cpumask_t smp_commenced_mask;
+static cpumask_t smp_callin_map = CPU_MASK_NONE;
 
 static int idle_proc(void *cpup)
 {
@@ -120,15 +120,15 @@ static int idle_proc(void *cpup)
 		     current->thread.mode.tt.extern_pid);
  
 	wmb();
-	if (test_and_set_bit(cpu, &smp_callin_map)) {
+	if (cpu_test_and_set(cpu, &smp_callin_map)) {
 		printk("huh, CPU#%d already present??\n", cpu);
 		BUG();
 	}
 
-	while (!test_bit(cpu, &smp_commenced_mask))
+	while (!cpu_isset(cpu, &smp_commenced_mask))
 		cpu_relax();
 
-	set_bit(cpu, &cpu_online_map);
+	cpu_set(cpu, cpu_online_map);
 	default_idle();
 	return(0);
 }
@@ -159,8 +159,8 @@ void smp_prepare_cpus(unsigned int maxcp
 	unsigned long waittime;
 	int err, cpu;
 
-	set_bit(0, &cpu_online_map);
-	set_bit(0, &smp_callin_map);
+	cpu_set(0, cpu_online_map);
+	cpu_set(0, smp_callin_map);
 
 	err = os_pipe(cpu_data[0].ipi_pipe, 1, 1);
 	if(err)	panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
@@ -177,10 +177,10 @@ void smp_prepare_cpus(unsigned int maxcp
 		unhash_process(idle);
 
 		waittime = 200000000;
-		while (waittime-- && !test_bit(cpu, &smp_callin_map))
+		while (waittime-- && !cpu_isset(cpu, smp_callin_map))
 			cpu_relax();
 
-		if (test_bit(cpu, &smp_callin_map))
+		if (cpu_isset(cpu, smp_callin_map))
 			printk("done\n");
 		else printk("failed\n");
 	}
@@ -188,13 +188,13 @@ void smp_prepare_cpus(unsigned int maxcp
 
 void smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
 }
 
 int __cpu_up(unsigned int cpu)
 {
-	set_bit(cpu, &smp_commenced_mask);
-	while (!test_bit(cpu, &cpu_online_map))
+	cpu_set(cpu, smp_commenced_mask);
+	while (!cpu_isset(cpu, cpu_online_map))
 		mb();
 	return(0);
 }
@@ -271,7 +271,7 @@ int smp_call_function(void (*_func)(void
 
 	for (i=0;i<NR_CPUS;i++)
 		if((i != current->thread_info->cpu) && 
-		   test_bit(i, &cpu_online_map))
+		   cpu_isset(i, cpu_online_map))
 			write(cpu_data[i].ipi_pipe[1], "C", 1);
 
 	while (atomic_read(&scf_started) != cpus)
diff -puN arch/um/kernel/tt/process_kern.c~cpumask_t-1 arch/um/kernel/tt/process_kern.c
--- 25/arch/um/kernel/tt/process_kern.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/um/kernel/tt/process_kern.c	2003-07-03 01:19:48.000000000 -0700
@@ -419,7 +419,7 @@ static int start_kernel_proc(void *unuse
 	cpu_tasks[0].pid = pid;
 	cpu_tasks[0].task = current;
 #ifdef CONFIG_SMP
- 	cpu_online_map = 1;
+ 	cpu_online_map = cpumask_of_cpu(0);
 #endif
 	if(debug) os_stop_process(pid);
 	start_kernel();
diff -puN arch/um/kernel/um_arch.c~cpumask_t-1 arch/um/kernel/um_arch.c
--- 25/arch/um/kernel/um_arch.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/um/kernel/um_arch.c	2003-07-03 01:19:48.000000000 -0700
@@ -57,7 +57,7 @@ static int show_cpuinfo(struct seq_file 
 
 	index = (struct cpuinfo_um *)v - cpu_data;
 #ifdef CONFIG_SMP
-	if (!(cpu_online_map & (1 << index)))
+	if (!cpu_online(index))
 		return 0;
 #endif
 
diff -puN arch/x86_64/kernel/apic.c~cpumask_t-1 arch/x86_64/kernel/apic.c
--- 25/arch/x86_64/kernel/apic.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/apic.c	2003-07-03 01:19:48.000000000 -0700
@@ -298,8 +298,8 @@ void __init setup_local_APIC (void)
 	 * Double-check whether this APIC is really registered.
 	 * This is meaningless in clustered apic mode, so we skip it.
 	 */
-	if (!clustered_apic_mode && 
-	    !test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map))
+	if (!clustered_apic_mode &&
+		!cpu_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map))
 		BUG();
 
 	/*
@@ -997,7 +997,7 @@ int __init APIC_init_uniprocessor (void)
 
 	connect_bsp_APIC();
 
-	phys_cpu_present_map = 1;
+	phys_cpu_present_map = cpumask_of_cpu(0);
 	apic_write_around(APIC_ID, boot_cpu_id);
 
 	setup_local_APIC();
diff -puN arch/x86_64/kernel/io_apic.c~cpumask_t-1 arch/x86_64/kernel/io_apic.c
--- 25/arch/x86_64/kernel/io_apic.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/io_apic.c	2003-07-03 01:19:48.000000000 -0700
@@ -1014,7 +1014,7 @@ void disable_IO_APIC(void)
 static void __init setup_ioapic_ids_from_mpc (void)
 {
 	union IO_APIC_reg_00 reg_00;
-	unsigned long phys_id_present_map = phys_cpu_present_map;
+	cpumask_t phys_id_present_map = phys_cpu_present_map;
 	int apic;
 	int i;
 	unsigned char old_id;
@@ -1047,22 +1047,22 @@ static void __init setup_ioapic_ids_from
 		 * system must have a unique ID or we get lots of nice
 		 * 'stuck on smp_invalidate_needed IPI wait' messages.
 	 	 */
-		if (phys_id_present_map & (1 << mp_ioapics[apic].mpc_apicid)) {
+		if (cpu_isset(mp_ioapics[apic].mpc_apicid, phys_id_present_map)) {
 			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
 				apic, mp_ioapics[apic].mpc_apicid);
 			for (i = 0; i < 0xf; i++)
-				if (!(phys_id_present_map & (1 << i)))
+				if (!cpu_isset(i, phys_id_present_map))
 					break;
 			if (i >= 0xf)
 				panic("Max APIC ID exceeded!\n");
 			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 				i);
-			phys_id_present_map |= 1 << i;
+			cpu_set(i, phys_id_present_map);
 			mp_ioapics[apic].mpc_apicid = i;
 		} else {
 			printk(KERN_INFO 
 			       "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
-			phys_id_present_map |= 1 << mp_ioapics[apic].mpc_apicid;
+			cpu_set(mp_ioapics[apic].mpc_apicid, phys_id_present_map);
 		}
 
 
diff -puN arch/x86_64/kernel/irq.c~cpumask_t-1 arch/x86_64/kernel/irq.c
--- 25/arch/x86_64/kernel/irq.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/irq.c	2003-07-03 01:19:48.000000000 -0700
@@ -792,13 +792,13 @@ int setup_irq(unsigned int irq, struct i
 static struct proc_dir_entry * root_irq_dir;
 static struct proc_dir_entry * irq_dir [NR_IRQS];
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
 static unsigned int parse_hex_value (const char *buffer,
-		unsigned long count, unsigned long *ret)
+		unsigned long count, cpumask_t *ret)
 {
 	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	cpumask_t value = CPU_MASK_NONE;
 	unsigned i;
 
 	if (!count)
@@ -812,10 +812,9 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
 
 	for (i = 0; i < count; i++) {
-		unsigned int c = hexnum[i];
+		unsigned int k, c = hexnum[i];
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -824,7 +823,10 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 4);
+		for (k = 0; k < 4; ++k)
+			if (c & (1 << k))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -835,20 +837,31 @@ out:
 
 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	int k, len;
+	cpumask_t tmp = irq_affinity[(long)data];
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
+
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int irq_affinity_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
 	int irq = (long) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t tmp, new_value;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -860,7 +873,8 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	irq_affinity[irq] = new_value;
@@ -874,17 +888,26 @@ static int irq_affinity_write_proc (stru
 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
+	cpumask_t tmp, *mask = (unsigned long *) data;
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", *mask);
+
+	tmp = *mask;
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(page, "%04hx", (u16)cpus_coerce(tmp));
+		len += j;
+		page += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+	len += sprintf(page, "\n");
+	return len;
 }
 
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	unsigned long full_count = count, err;
+	cpumask_t new_value, *mask = (cpumask_t *)data;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
diff -puN arch/x86_64/kernel/mpparse.c~cpumask_t-1 arch/x86_64/kernel/mpparse.c
--- 25/arch/x86_64/kernel/mpparse.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/mpparse.c	2003-07-03 01:19:48.000000000 -0700
@@ -65,7 +65,7 @@ unsigned int boot_cpu_id = -1U;
 static unsigned int num_processors = 0;
 
 /* Bitmask of physically existing CPUs */
-unsigned long phys_cpu_present_map = 0;
+cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 
 /* ACPI MADT entry parsing functions */
 #ifdef CONFIG_ACPI_BOOT
@@ -124,7 +124,7 @@ static void __init MP_processor_info (st
 	}
 	ver = m->mpc_apicver;
 
-	phys_cpu_present_map |= 1 << m->mpc_apicid;
+	cpu_set(m->mpc_apicid, phys_cpu_present_map);
 	/*
 	 * Validate version
 	 */
diff -puN arch/x86_64/kernel/msr.c~cpumask_t-1 arch/x86_64/kernel/msr.c
--- 25/arch/x86_64/kernel/msr.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/msr.c	2003-07-03 01:19:48.000000000 -0700
@@ -242,7 +242,7 @@ static int msr_open(struct inode *inode,
   int cpu = minor(file->f_dentry->d_inode->i_rdev);
   struct cpuinfo_x86 *c = &(cpu_data)[cpu];
   
-  if ( !(cpu_online_map & (1UL << cpu)) )
+  if (!cpu_online(cpu))
     return -ENXIO;		/* No such CPU */
   if ( !cpu_has(c, X86_FEATURE_MSR) )
     return -EIO;		/* MSR not supported */
diff -puN arch/x86_64/kernel/reboot.c~cpumask_t-1 arch/x86_64/kernel/reboot.c
--- 25/arch/x86_64/kernel/reboot.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/reboot.c	2003-07-03 01:19:48.000000000 -0700
@@ -110,7 +110,7 @@ static void smp_halt(void)
 	}
 
 	/* Wait for all other CPUs to have run smp_stop_cpu */
-	while (cpu_online_map) 
+	while (!cpus_empty(cpu_online_map))
 		rep_nop(); 
 }
 #endif
diff -puN arch/x86_64/kernel/setup.c~cpumask_t-1 arch/x86_64/kernel/setup.c
--- 25/arch/x86_64/kernel/setup.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/setup.c	2003-07-03 01:19:48.000000000 -0700
@@ -643,7 +643,7 @@ static int show_cpuinfo(struct seq_file 
 
 
 #ifdef CONFIG_SMP
-	if (!(cpu_online_map & (1<<(c-cpu_data))))
+	if (!cpu_online(c-cpu_data))
 		return 0;
 #endif
 
diff -puN arch/x86_64/kernel/smpboot.c~cpumask_t-1 arch/x86_64/kernel/smpboot.c
--- 25/arch/x86_64/kernel/smpboot.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/smpboot.c	2003-07-03 01:19:48.000000000 -0700
@@ -54,11 +54,11 @@
 #include <asm/proto.h>
 
 /* Bitmask of currently online CPUs */
-unsigned long cpu_online_map = 1;
+cpumask_t cpu_online_map = cpumask_of_cpu(0);
 
-static volatile unsigned long cpu_callin_map;
-volatile unsigned long cpu_callout_map;
-static unsigned long smp_commenced_mask;
+static cpumask_t cpu_callin_map;
+cpumask_t cpu_callout_map;
+static cpumask_t smp_commenced_mask;
 
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
@@ -174,7 +174,7 @@ static void __init synchronize_tsc_bp (v
 
 	sum = 0;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (test_bit(i, &cpu_callout_map)) {
+		if (cpu_isset(i, cpu_callout_map)) {
 		t0 = tsc_values[i];
 		sum += t0;
 	}
@@ -183,7 +183,7 @@ static void __init synchronize_tsc_bp (v
 
 	sum = 0;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (!test_bit(i, &cpu_callout_map))
+		if (!cpu_isset(i, cpu_callout_map))
 			continue;
 
 		delta = tsc_values[i] - avg;
@@ -258,7 +258,7 @@ void __init smp_callin(void)
 	 */
 	phys_id = GET_APIC_ID(apic_read(APIC_ID));
 	cpuid = smp_processor_id();
-	if (test_and_set_bit(cpuid, &cpu_callin_map)) {
+	if (cpu_test_and_set(cpuid, cpu_callin_map)) {
 		panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
 					phys_id, cpuid);
 	}
@@ -280,7 +280,7 @@ void __init smp_callin(void)
 		/*
 		 * Has the boot CPU finished it's STARTUP sequence?
 		 */
-		if (test_bit(cpuid, &cpu_callout_map))
+		if (cpu_isset(cpuid, cpu_callout_map))
 			break;
 		rep_nop();
 	}
@@ -320,7 +320,7 @@ void __init smp_callin(void)
 	/*
 	 * Allow the master to continue.
 	 */
-	set_bit(cpuid, &cpu_callin_map);
+	cpu_set(cpuid, cpu_callin_map);
 
 	/*
 	 *      Synchronize the TSC with the BP
@@ -348,7 +348,7 @@ void __init start_secondary(void)
 	barrier();
 
 	Dprintk("cpu %d: waiting for commence\n", smp_processor_id()); 
-	while (!test_bit(smp_processor_id(), &smp_commenced_mask))
+	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
 		rep_nop();
 
 	Dprintk("cpu %d: setting up apic clock\n", smp_processor_id()); 	
@@ -372,7 +372,7 @@ void __init start_secondary(void)
 	local_flush_tlb();
 
 	Dprintk("cpu %d eSetting cpu_online_map\n", smp_processor_id()); 
-	set_bit(smp_processor_id(), &cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
 	wmb();
 	
 	cpu_idle();
@@ -630,19 +630,19 @@ static void __init do_boot_cpu (int apic
 		 * allow APs to start initializing.
 		 */
 		Dprintk("Before Callout %d.\n", cpu);
-		set_bit(cpu, &cpu_callout_map);
+		cpu_set(cpu, cpu_callout_map);
 		Dprintk("After Callout %d.\n", cpu);
 
 		/*
 		 * Wait 5s total for a response
 		 */
 		for (timeout = 0; timeout < 50000; timeout++) {
-			if (test_bit(cpu, &cpu_callin_map))
+			if (cpu_isset(cpu, cpu_callin_map))
 				break;	/* It has booted */
 			udelay(100);
 		}
 
-		if (test_bit(cpu, &cpu_callin_map)) {
+		if (cpu_isset(cpu, cpu_callin_map)) {
 			/* number CPUs logically, starting from 1 (BSP is 0) */
 			Dprintk("OK.\n");
 			printk(KERN_INFO "CPU%d: ", cpu);
@@ -663,7 +663,7 @@ static void __init do_boot_cpu (int apic
 		}
 	}
 	if (boot_error) {
-		clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
+		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
 		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
 		cpucount--;
 	}
@@ -734,10 +734,10 @@ static void __init smp_boot_cpus(unsigne
 	current_thread_info()->cpu = 0;
 	smp_tune_scheduling();
 
-	if (!test_bit(hard_smp_processor_id(), &phys_cpu_present_map)) {
+	if (!cpu_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
 		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
 		       hard_smp_processor_id());
-		phys_cpu_present_map |= (1 << hard_smp_processor_id());
+		cpu_set(hard_smp_processor_id(), phys_cpu_present_map());
 	}
 
 	/*
@@ -747,8 +747,8 @@ static void __init smp_boot_cpus(unsigne
 	if (!smp_found_config) {
 		printk(KERN_NOTICE "SMP motherboard not detected.\n");
 		io_apic_irqs = 0;
-		cpu_online_map = phys_cpu_present_map = 1;
-		phys_cpu_present_map = 1;
+		cpu_online_map = cpumask_of_cpu(0);
+		phys_cpu_present_map = cpumask_of_cpu(0);
 		if (APIC_init_uniprocessor())
 			printk(KERN_NOTICE "Local APIC not detected."
 					   " Using dummy APIC emulation.\n");
@@ -759,10 +759,10 @@ static void __init smp_boot_cpus(unsigne
 	 * Should not be necessary because the MP table should list the boot
 	 * CPU too, but we do it for the sake of robustness anyway.
 	 */
-	if (!test_bit(boot_cpu_id, &phys_cpu_present_map)) {
+	if (!cpu_isset(boot_cpu_id, phys_cpu_present_map)) {
 		printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
 								 boot_cpu_id);
-		phys_cpu_present_map |= (1 << hard_smp_processor_id());
+		cpu_set(hard_smp_processor_id(), phys_cpu_present_map);
 	}
 
 	/*
@@ -773,8 +773,8 @@ static void __init smp_boot_cpus(unsigne
 			boot_cpu_id);
 		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
 		io_apic_irqs = 0;
-		cpu_online_map = phys_cpu_present_map = 1;
-		phys_cpu_present_map = 1;
+		cpu_online_map = cpumask_of_cpu(0);
+		phys_cpu_present_map = cpumask_of_cpu(0);
 		disable_apic = 1;
 		goto smp_done;
 	}
@@ -788,8 +788,8 @@ static void __init smp_boot_cpus(unsigne
 		smp_found_config = 0;
 		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
 		io_apic_irqs = 0;
-		cpu_online_map = phys_cpu_present_map = 1;
-		phys_cpu_present_map = 1;
+		cpu_online_map = cpumask_of_cpu(0);
+		phys_cpu_present_map = cpumask_of_cpu(0);
 		disable_apic = 1;
 		goto smp_done;
 	}
@@ -812,7 +812,7 @@ static void __init smp_boot_cpus(unsigne
 		if (apicid == boot_cpu_id)
 			continue;
 
-		if (!(phys_cpu_present_map & (1 << apicid)))
+		if (!cpu_isset(apicid, phys_cpu_present_map))
 			continue;
 		if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
 			continue;
@@ -848,7 +848,7 @@ static void __init smp_boot_cpus(unsigne
 	} else {
 		unsigned long bogosum = 0;
 		for (cpu = 0; cpu < NR_CPUS; cpu++)
-			if (cpu_callout_map & (1<<cpu))
+			if (cpu_isset(cpu, cpu_callout_map))
 				bogosum += cpu_data[cpu].loops_per_jiffy;
 		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 			cpucount+1,
@@ -889,20 +889,20 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_callout_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_callout_map);
 }
 
 int __devinit __cpu_up(unsigned int cpu)
 {
 	/* This only works at boot for x86.  See "rewrite" above. */
-	if (test_bit(cpu, &smp_commenced_mask)) { 
+	if (cpu_isset(cpu, smp_commenced_mask)) {
 		local_irq_enable();
 		return -ENOSYS;
 	}
 
 	/* In case one didn't come up */
-	if (!test_bit(cpu, &cpu_callin_map)) { 
+	if (!cpu_isset(cpu, cpu_callin_map)) {
 		local_irq_enable();
 		return -EIO;
 	}
@@ -911,8 +911,8 @@ int __devinit __cpu_up(unsigned int cpu)
 	/* Unleash the CPU! */
 	Dprintk("waiting for cpu %d\n", cpu);
 
-	set_bit(cpu, &smp_commenced_mask);
-	while (!test_bit(cpu, &cpu_online_map))
+	cpu_set(cpu, smp_commenced_mask);
+	while (!cpu_isset(cpu, cpu_online_map))
 		mb();
 	return 0;
 }
diff -puN arch/x86_64/kernel/smp.c~cpumask_t-1 arch/x86_64/kernel/smp.c
--- 25/arch/x86_64/kernel/smp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/smp.c	2003-07-03 01:19:48.000000000 -0700
@@ -230,9 +230,10 @@ out:
 	put_cpu_no_resched();
 }
 
-static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
 						unsigned long va)
 {
+	cpumask_t tmp;
 	/*
 	 * A couple of (to be removed) sanity checks:
 	 *
@@ -240,12 +241,10 @@ static void flush_tlb_others (unsigned l
 	 * - current CPU must not be in mask
 	 * - mask must exist :)
 	 */
-	if (!cpumask)
-		BUG();
-	if ((cpumask & cpu_online_map) != cpumask)
-		BUG();
-	if (cpumask & (1 << smp_processor_id()))
-		BUG();
+	BUG_ON(cpus_empty(cpumask));
+	cpus_and(tmp, cpumask, cpu_online_map);
+	BUG_ON(!cpus_equal(tmp, cpumask));
+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
 	if (!mm)
 		BUG();
 
@@ -430,7 +429,7 @@ void smp_stop_cpu(void)
 	/*
 	 * Remove this CPU:
 	 */
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	local_irq_disable();
 	disable_local_APIC();
 	local_irq_enable(); 
@@ -491,8 +490,8 @@ int slow_smp_processor_id(void)
 	unsigned long sp = (unsigned long)&stack_location; 
 	int offset = 0, cpu;
 
-	for (offset = 0; (cpu_online_map >> offset); offset = cpu + 1) { 
-		cpu = ffz(~(cpu_online_map >> offset));
+	for (offset = 0; next_cpu(cpu_online_map, offset) < NR_CPUS; offset = cpu + 1) {
+		cpu = next_cpu(cpu_online_map, offset);
 
 		if (sp >= (u64)cpu_pda[cpu].irqstackptr - IRQSTACKSIZE && 
 		    sp <= (u64)cpu_pda[cpu].irqstackptr)
diff -puN drivers/base/node.c~cpumask_t-1 drivers/base/node.c
--- 25/drivers/base/node.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/drivers/base/node.c	2003-07-03 01:19:48.000000000 -0700
@@ -7,6 +7,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/node.h>
+#include <linux/cpumask.h>
 #include <linux/topology.h>
 
 static struct sysdev_class node_class = {
@@ -17,7 +18,17 @@ static struct sysdev_class node_class = 
 static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
 {
 	struct node *node_dev = to_node(dev);
-        return sprintf(buf,"%lx\n",node_dev->cpumap);
+	cpumask_t tmp = node_dev->cpumap;
+	int k, len = 0;
+
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) {
+		int j = sprintf(buf, "%04hx", (u16)cpus_coerce(tmp));
+		len += j;
+		buf += j;
+		cpus_shift_right(tmp, tmp, 16);
+	}
+        len += sprintf(buf, "\n");
+	return len;
 }
 static SYSDEV_ATTR(cpumap,S_IRUGO,node_read_cpumap,NULL);
 
diff -puN drivers/s390/char/sclp.c~cpumask_t-1 drivers/s390/char/sclp.c
--- 25/drivers/s390/char/sclp.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/drivers/s390/char/sclp.c	2003-07-03 01:19:48.000000000 -0700
@@ -468,17 +468,17 @@ static struct sclp_register sclp_state_c
  * SCLP quiesce event handler
  */
 #ifdef CONFIG_SMP
-static volatile unsigned long cpu_quiesce_map;
+static cpumask_t cpu_quiesce_map;
 
 static void
 do_load_quiesce_psw(void * __unused)
 {
 	psw_t quiesce_psw;
 
-	clear_bit(smp_processor_id(), &cpu_quiesce_map);
+	cpu_clear(smp_processor_id(), cpu_quiesce_map);
 	if (smp_processor_id() == 0) {
 		/* Wait for all other cpus to enter do_load_quiesce_psw */
-		while (cpu_quiesce_map != 0);
+		while (!cpus_empty(cpu_quiesce_map));
 		/* Quiesce the last cpu with the special psw */
 		quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
 		quiesce_psw.addr = 0xfff;
diff -puN include/asm-alpha/smp.h~cpumask_t-1 include/asm-alpha/smp.h
--- 25/include/asm-alpha/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-alpha/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -3,6 +3,7 @@
 
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <linux/bitops.h>
 #include <asm/pal.h>
 
@@ -44,27 +45,12 @@ extern struct cpuinfo_alpha cpu_data[NR_
 #define hard_smp_processor_id()	__hard_smp_processor_id()
 #define smp_processor_id()	(current_thread_info()->cpu)
 
-extern unsigned long cpu_present_mask;
-extern volatile unsigned long cpu_online_map;
+extern cpumask_t cpu_present_mask;
+extern cpumask_t long cpu_online_map;
 extern int smp_num_cpus;
 
-#define cpu_possible(cpu)	(cpu_present_mask & (1UL << (cpu)))
-#define cpu_online(cpu)		(cpu_online_map & (1UL << (cpu)))
-
-static inline int
-num_online_cpus(void)
-{
-	return hweight64(cpu_online_map);
-}
-
-extern inline int
-any_online_cpu(unsigned int mask)
-{
-        if (mask & cpu_online_map)
-                return __ffs(mask & cpu_online_map);
-
-        return -1;
-}
+#define cpu_possible(cpu)	cpu_isset(cpu, cpu_present_mask)
+#define cpu_online(cpu)		cpu_isset(cpu, cpu_online_map)
 
 extern int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, unsigned long cpu);
 
diff -puN /dev/null include/asm-generic/cpumask_arith.h
--- /dev/null	2002-08-30 16:31:37.000000000 -0700
+++ 25-akpm/include/asm-generic/cpumask_arith.h	2003-07-03 01:19:48.000000000 -0700
@@ -0,0 +1,61 @@
+#ifndef __ASM_GENERIC_CPUMASK_ARITH_H
+#define __ASM_GENERIC_CPUMASK_ARITH_H
+
+#define cpu_set(cpu, map)				\
+	do {						\
+		map |= ((cpumask_t)1) << (cpu);		\
+	} while (0)
+#define cpu_clear(cpu, map)				\
+	do {						\
+		map &= ~(((cpumask_t)1) << (cpu));	\
+	} while (0)
+#define cpu_isset(cpu, map)				\
+	((map) & (((cpumask_t)1) << (cpu)))
+#define cpu_test_and_set(cpu, map)			\
+	test_and_set_bit(cpu, (unsigned long *)(&(map)))
+
+#define cpus_and(dst,src1,src2)		do { dst = (src1) & (src2); } while (0)
+#define cpus_or(dst,src1,src2)		do { dst = (src1) | (src2); } while (0)
+#define cpus_clear(map)			do { map = 0; } while (0)
+#define cpus_complement(map)		do { map = ~(map); } while (0)
+#define cpus_equal(map1, map2)		((map1) == (map2))
+#define cpus_empty(map)			((map) == 0)
+
+#if BITS_PER_LONG == 32
+#if NR_CPUS <= 32
+#define cpus_weight(map)		hweight32(map)
+#else
+#define cpus_weight(map)				\
+({							\
+	u32 *__map = (u32 *)(&(map));			\
+	hweight32(__map[0]) + hweight32(__map[1]);	\
+})
+#endif
+#elif BITS_PER_LONG == 64
+#define cpus_weight(map)		hweight64(map)
+#endif
+
+#define cpus_shift_right(dst, src, n)	do { dst = (src) >> (n); } while (0)
+#define cpus_shift_left(dst, src, n)	do { dst = (src) >> (n); } while (0)
+
+#define any_online_cpu(map)		(!cpus_empty(map))
+
+
+#define CPU_MASK_ALL	(~((cpumask_t)0) >> (8*sizeof(cpumask_t) - NR_CPUS))
+#define CPU_MASK_NONE	((cpumask_t)0)
+
+/* only ever use this for things that are _never_ used on large boxen */
+#define cpus_coerce(map)		((unsigned long)(map))
+#define cpus_promote(map)		({ map; })
+#define cpumask_of_cpu(cpu)		({ ((cpumask_t)1) << (cpu); })
+
+#ifdef CONFIG_SMP
+#define first_cpu(map)			__ffs(map)
+#define next_cpu(cpu, map)				\
+	__ffs((map) & ~(((cpumask_t)1 << (cpu)) - 1))
+#else
+#define first_cpu(map)			0
+#define next_cpu(cpu, map)		1
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_GENERIC_CPUMASK_ARITH_H */
diff -puN /dev/null include/asm-generic/cpumask_array.h
--- /dev/null	2002-08-30 16:31:37.000000000 -0700
+++ 25-akpm/include/asm-generic/cpumask_array.h	2003-07-03 01:19:48.000000000 -0700
@@ -0,0 +1,41 @@
+#ifndef __ASM_GENERIC_CPUMASK_ARRAY_H
+#define __ASM_GENERIC_CPUMASK_ARRAY_H
+
+#define CPU_ARRAY_SIZE		BITS_TO_LONGS(NR_CPUS)
+
+#define cpu_set(cpu, map)		set_bit(cpu, (map).mask)
+#define cpu_clear(cpu, map)		clear_bit(cpu, (map).mask)
+#define cpu_isset(cpu, map)		test_bit(cpu, (map).mask)
+#define cpu_test_and_set(cpu, map)	test_and_set_bit(cpu, (map).mask)
+
+#define cpus_and(dst,src1,src2)	bitmap_and((dst).mask,(src1).mask, (src2).mask, NR_CPUS)
+#define cpus_or(dst,src1,src2)	bitmap_or((dst).mask, (src1).mask, (src2).mask, NR_CPUS)
+#define cpus_clear(map)		bitmap_clear((map).mask, NR_CPUS)
+#define cpus_complement(map)	bitmap_complement((map).mask, NR_CPUS)
+#define cpus_equal(map1, map2)	bitmap_equal((map1).mask, (map2).mask, NR_CPUS)
+#define cpus_empty(map)		bitmap_empty(map.mask, NR_CPUS)
+#define cpus_weight(map)		bitmap_weight((map).mask, NR_CPUS)
+#define cpus_shift_right(d, s, n)	bitmap_shift_right((d).mask, (s).mask, n, NR_CPUS)
+#define cpus_shift_left(d, s, n)	bitmap_shift_left((d).mask, (s).mask, n, NR_CPUS)
+#define first_cpu(map)		find_first_bit((map).mask, NR_CPUS)
+#define next_cpu(cpu, map)	find_next_bit((map).mask, NR_CPUS, cpu)
+
+/* only ever use this for things that are _never_ used on large boxen */
+#define cpus_coerce(map)	((map).mask[0])
+#define cpus_promote(map)	({ cpumask_t __cpu_mask = CPU_MASK_NONE;\
+					__cpu_mask.mask[0] = map;	\
+					__cpu_mask;			\
+				})
+#define cpumask_of_cpu(cpu)	({ cpumask_t __cpu_mask = CPU_MASK_NONE;\
+					cpu_set(cpu, __cpu_mask);	\
+					__cpu_mask;			\
+				})
+#define any_online_cpu(map)	find_first_bit((map).mask, NR_CPUS)
+
+/*
+ * um, these need to be usable as static initializers
+ */
+#define CPU_MASK_ALL	{ {[0 ... CPU_ARRAY_SIZE-1] = ~0UL} }
+#define CPU_MASK_NONE	{ {[0 ... CPU_ARRAY_SIZE-1] =  0UL} }
+
+#endif /* __ASM_GENERIC_CPUMASK_ARRAY_H */
diff -puN /dev/null include/asm-generic/cpumask_const_reference.h
--- /dev/null	2002-08-30 16:31:37.000000000 -0700
+++ 25-akpm/include/asm-generic/cpumask_const_reference.h	2003-07-03 01:19:48.000000000 -0700
@@ -0,0 +1,29 @@
+#ifndef __ASM_GENERIC_CPUMASK_CONST_REFERENCE_H
+#define __ASM_GENERIC_CPUMASK_CONST_REFERENCE_H
+
+struct cpumask_ref {
+	const cpumask_t *val;
+};
+
+typedef const struct cpumask_ref cpumask_const_t;
+
+#define mk_cpumask_const(map)		((cpumask_const_t){ &(map) })
+#define cpu_isset_const(cpu, map)	cpu_isset(cpu, *(map).val)
+
+#define cpus_and_const(dst,src1,src2)	cpus_and(dst,*(src1).val,*(src2).val)
+#define cpus_or_const(dst,src1,src2)	cpus_or(dst,*(src1).val,*(src2).val)
+
+#define cpus_equal_const(map1, map2)	cpus_equal(*(map1).val, *(map2).val)
+
+#define cpus_copy_const(map1, map2)	bitmap_copy((map1).mask, (map2).val->mask, NR_CPUS)
+
+#define cpus_empty_const(map)		cpus_empty(*(map).val)
+#define cpus_weight_const(map)		cpus_weight(*(map).val)
+#define first_cpu_const(map)		first_cpu(*(map).val)
+#define next_cpu_const(cpu, map)	next_cpu(cpu, *(map).val)
+
+/* only ever use this for things that are _never_ used on large boxen */
+#define cpus_coerce_const(map)		cpus_coerce(*(map).val)
+#define any_online_cpu_const(map)	any_online_cpu(*(map).val)
+
+#endif /* __ASM_GENERIC_CPUMASK_CONST_REFERENCE_H */
diff -puN /dev/null include/asm-generic/cpumask_const_value.h
--- /dev/null	2002-08-30 16:31:37.000000000 -0700
+++ 25-akpm/include/asm-generic/cpumask_const_value.h	2003-07-03 01:19:48.000000000 -0700
@@ -0,0 +1,21 @@
+#ifndef __ASM_GENERIC_CPUMASK_CONST_VALUE_H
+#define __ASM_GENERIC_CPUMASK_CONST_VALUE_H
+
+typedef const cpumask_t cpumask_const_t;
+
+#define mk_cpumask_const(map)		((cpumask_const_t)(map))
+#define cpu_isset_const(cpu, map)	cpu_isset(cpu, map)
+#define cpus_and_const(dst,src1,src2)	cpus_and(dst, src1, src2)
+#define cpus_or_const(dst,src1,src2)	cpus_or(dst, src1, src2)
+#define cpus_equal_const(map1, map2)	cpus_equal(map1, map2)
+#define cpus_empty_const(map)		cpus_empty(map)
+#define cpus_copy_const(map1, map2)	do { map1 = (cpumask_t)map2; } while (0)
+#define cpus_weight_const(map)		cpus_weight(map)
+#define first_cpu_const(map)		first_cpu(map)
+#define next_cpu_const(cpu, map)	next_cpu(cpu, map)
+
+/* only ever use this for things that are _never_ used on large boxen */
+#define cpus_coerce_const(map)		cpus_coerce(map)
+#define any_online_cpu_const(map)	any_online_cpu(map)
+
+#endif /* __ASM_GENERIC_CPUMASK_CONST_VALUE_H */
diff -puN /dev/null include/asm-generic/cpumask_up.h
--- /dev/null	2002-08-30 16:31:37.000000000 -0700
+++ 25-akpm/include/asm-generic/cpumask_up.h	2003-07-03 01:19:48.000000000 -0700
@@ -0,0 +1,60 @@
+#ifndef __ASM_GENERIC_CPUMASK_UP_H
+#define __ASM_GENERIC_CPUMASK_UP_H
+
+#define CPU_ARRAY_SIZE		BITS_TO_LONGS(NR_CPUS)
+
+#define cpus_coerce(map)	((map).mask[0])
+
+#define cpu_set(cpu, map)		do { cpus_coerce(map) = 1UL; } while (0)
+#define cpu_clear(cpu, map)		do { cpus_coerce(map) = 0UL; } while (0)
+#define cpu_isset(cpu, map)		(cpus_coerce(map) != 0UL)
+#define cpu_test_and_set(cpu, map)	test_and_set_bit(0, (map).mask)
+
+#define cpus_and(dst, src1, src2)					\
+	do {								\
+		if (cpus_coerce(src1) && cpus_coerce(src2))		\
+			cpus_coerce(dst) = 1UL;				\
+		else							\
+			cpus_coerce(dst) = 0UL;				\
+	} while (0)
+
+#define cpus_or(dst, src1, src2)					\
+	do {								\
+		if (cpus_coerce(src1) || cpus_coerce(src2))		\
+			cpus_coerce(dst) = 1UL;				\
+		else							\
+			cpus_coerce(dst) = 0UL;				\
+	} while (0)
+
+#define cpus_clear(map)			do { cpus_coerce(map) = 0UL; } while (0)
+
+#define cpus_complement(map)						\
+	do {								\
+		cpus_coerce(map) = !cpus_coerce(map);			\
+	} while (0)
+
+#define cpus_equal(map1, map2)		(cpus_coerce(map1) == cpus_coerce(map2))
+#define cpus_empty(map)			(cpus_coerce(map) == 0UL)
+#define cpus_weight(map)		(cpus_coerce(map) ? 1UL : 0UL)
+#define cpus_shift_right(d, s, n)	do { cpus_coerce(d) = 0UL; } while (0)
+#define cpus_shift_left(d, s, n)	do { cpus_coerce(d) = 0UL; } while (0)
+#define first_cpu(map)			(cpus_coerce(map) ? 0 : 1)
+#define next_cpu(cpu, map)		1
+
+/* only ever use this for things that are _never_ used on large boxen */
+#define cpus_promote(map)						\
+	({								\
+		cpumask_t __tmp__;					\
+		cpus_coerce(__tmp__) = map;				\
+		__tmp__;						\
+	})
+#define cpumask_of_cpu(cpu)		cpus_promote(1)
+#define any_online_cpu(map)		(cpus_coerce(map) ? 0 : 1)
+
+/*
+ * um, these need to be usable as static initializers
+ */
+#define CPU_MASK_ALL	{ {[0 ... CPU_ARRAY_SIZE-1] =  1UL} }
+#define CPU_MASK_NONE	{ {[0 ... CPU_ARRAY_SIZE-1] =  0UL} }
+
+#endif /* __ASM_GENERIC_CPUMASK_UP_H */
diff -puN include/asm-i386/atomic.h~cpumask_t-1 include/asm-i386/atomic.h
--- 25/include/asm-i386/atomic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/atomic.h	2003-07-03 01:19:48.000000000 -0700
@@ -193,7 +193,7 @@ __asm__ __volatile__(LOCK "andl %0,%1" \
 
 #define atomic_set_mask(mask, addr) \
 __asm__ __volatile__(LOCK "orl %0,%1" \
-: : "r" (mask),"m" (*addr) : "memory")
+: : "r" (mask),"m" (*(addr)) : "memory")
 
 /* Atomic operations are already serializing on x86 */
 #define smp_mb__before_atomic_dec()	barrier()
diff -puN include/asm-i386/bitops.h~cpumask_t-1 include/asm-i386/bitops.h
--- 25/include/asm-i386/bitops.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/bitops.h	2003-07-03 01:19:48.000000000 -0700
@@ -270,7 +270,7 @@ static __inline__ int variable_test_bit(
  * Returns the bit-number of the first zero bit, not the number of the byte
  * containing a bit.
  */
-static __inline__ int find_first_zero_bit(unsigned long * addr, unsigned size)
+static __inline__ int find_first_zero_bit(const unsigned long *addr, unsigned size)
 {
 	int d0, d1, d2;
 	int res;
@@ -302,7 +302,7 @@ static __inline__ int find_first_zero_bi
  * Returns the bit-number of the first set bit, not the number of the byte
  * containing a bit.
  */
-static __inline__ int find_first_bit(unsigned long * addr, unsigned size)
+static __inline__ int find_first_bit(const unsigned long *addr, unsigned size)
 {
 	int d0, d1;
 	int res;
@@ -328,7 +328,7 @@ static __inline__ int find_first_bit(uns
  * @offset: The bitnumber to start searching at
  * @size: The maximum size to search
  */
-static __inline__ int find_next_zero_bit(unsigned long * addr, int size, int offset)
+static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset)
 {
 	unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
 	int set = 0, bit = offset & 31, res;
@@ -361,9 +361,9 @@ static __inline__ int find_next_zero_bit
  * @offset: The bitnumber to start searching at
  * @size: The maximum size to search
  */
-static __inline__ int find_next_bit(unsigned long *addr, int size, int offset)
+static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset)
 {
-	unsigned long * p = addr + (offset >> 5);
+	const unsigned long *p = addr + (offset >> 5);
 	int set = 0, bit = offset & 31, res;
 
 	if (bit) {
@@ -430,7 +430,7 @@ static __inline__ unsigned long __ffs(un
  * unlikely to be set. It's guaranteed that at least one of the 140
  * bits is cleared.
  */
-static inline int sched_find_first_bit(unsigned long *b)
+static inline int sched_find_first_bit(const unsigned long *b)
 {
 	if (unlikely(b[0]))
 		return __ffs(b[0]);
diff -puN include/asm-i386/genapic.h~cpumask_t-1 include/asm-i386/genapic.h
--- 25/include/asm-i386/genapic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/genapic.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,13 +1,13 @@
 #ifndef _ASM_GENAPIC_H
 #define _ASM_GENAPIC_H 1
 
-/* 
+/*
  * Generic APIC driver interface.
- *  
- * An straight forward mapping of the APIC related parts of the 
+ *
+ * An straight forward mapping of the APIC related parts of the
  * x86 subarchitecture interface to a dynamic object.
- *	
- * This is used by the "generic" x86 subarchitecture. 
+ *
+ * This is used by the "generic" x86 subarchitecture.
  *
  * Copyright 2003 Andi Kleen, SuSE Labs.
  */
@@ -22,23 +22,23 @@ struct genapic { 
 	int (*probe)(void); 
 
 	int (*apic_id_registered)(void);
-	unsigned long (*target_cpus)(void); 
+	cpumask_t (*target_cpus)(void);
 	int int_delivery_mode;
 	int int_dest_mode; 
 	int apic_broadcast_id; 
 	int esr_disable;
-	unsigned long (*check_apicid_used)(unsigned long bitmap, int apicid); 
+	unsigned long (*check_apicid_used)(cpumask_const_t bitmap, int apicid);
 	unsigned long (*check_apicid_present)(int apicid); 
 	int no_balance_irq;
 	void (*init_apic_ldr)(void);
-	unsigned long (*ioapic_phys_id_map)(unsigned long map); 
+	cpumask_t (*ioapic_phys_id_map)(cpumask_const_t map);
 
 	void (*clustered_apic_check)(void);
 	int (*multi_timer_check)(int apic, int irq);
 	int (*apicid_to_node)(int logical_apicid); 
 	int (*cpu_to_logical_apicid)(int cpu);
 	int (*cpu_present_to_apicid)(int mps_cpu);
-	unsigned long (*apicid_to_cpu_present)(int phys_apicid); 
+	cpumask_t (*apicid_to_cpu_present)(int phys_apicid);
 	int (*mpc_apic_id)(struct mpc_config_processor *m, 
 			   struct mpc_config_translation *t); 
 	void (*setup_portio_remap)(void); 
@@ -59,11 +59,11 @@ struct genapic { 
 	int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
 
 	unsigned (*get_apic_id)(unsigned long x);
-	unsigned long apic_id_mask; 
-	unsigned int (*cpu_mask_to_apicid)(unsigned long cpumask);
+	unsigned long apic_id_mask;
+	unsigned int (*cpu_mask_to_apicid)(cpumask_const_t cpumask);
 	
 	/* ipi */
-	void (*send_IPI_mask)(int mask, int vector);
+	void (*send_IPI_mask)(cpumask_t mask, int vector);
 	void (*send_IPI_allbutself)(int vector);
 	void (*send_IPI_all)(int vector);
 }; 
diff -puN include/asm-i386/highmem.h~cpumask_t-1 include/asm-i386/highmem.h
--- 25/include/asm-i386/highmem.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/highmem.h	2003-07-03 01:19:48.000000000 -0700
@@ -22,6 +22,7 @@
 
 #include <linux/config.h>
 #include <linux/interrupt.h>
+#include <linux/threads.h>
 #include <asm/kmap_types.h>
 #include <asm/tlbflush.h>
 
@@ -39,7 +40,11 @@ extern void kmap_init(void);
  * easily, subsequent pte tables have to be allocated in one physical
  * chunk of RAM.
  */
+#if NR_CPUS <= 32
 #define PKMAP_BASE (0xff800000UL)
+#else
+#define PKMAP_BASE (0xff600000UL)
+#endif
 #ifdef CONFIG_X86_PAE
 #define LAST_PKMAP 512
 #else
diff -puN include/asm-i386/hw_irq.h~cpumask_t-1 include/asm-i386/hw_irq.h
--- 25/include/asm-i386/hw_irq.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/hw_irq.h	2003-07-03 01:19:48.000000000 -0700
@@ -30,33 +30,33 @@ extern int irq_vector[NR_IRQS];
 extern void (*interrupt[NR_IRQS])(void);
 
 #ifdef CONFIG_SMP
-extern asmlinkage void reschedule_interrupt(void);
-extern asmlinkage void invalidate_interrupt(void);
-extern asmlinkage void call_function_interrupt(void);
+asmlinkage void reschedule_interrupt(void);
+asmlinkage void invalidate_interrupt(void);
+asmlinkage void call_function_interrupt(void);
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
-extern asmlinkage void apic_timer_interrupt(void);
-extern asmlinkage void error_interrupt(void);
-extern asmlinkage void spurious_interrupt(void);
-extern asmlinkage void thermal_interrupt(struct pt_regs);
+asmlinkage void apic_timer_interrupt(void);
+asmlinkage void error_interrupt(void);
+asmlinkage void spurious_interrupt(void);
+asmlinkage void thermal_interrupt(struct pt_regs);
 #endif
 
-extern void mask_irq(unsigned int irq);
-extern void unmask_irq(unsigned int irq);
-extern void disable_8259A_irq(unsigned int irq);
-extern void enable_8259A_irq(unsigned int irq);
-extern int i8259A_irq_pending(unsigned int irq);
-extern void make_8259A_irq(unsigned int irq);
-extern void init_8259A(int aeoi);
-extern void FASTCALL(send_IPI_self(int vector));
-extern void init_VISWS_APIC_irqs(void);
-extern void setup_IO_APIC(void);
-extern void disable_IO_APIC(void);
-extern void print_IO_APIC(void);
-extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-extern void send_IPI(int dest, int vector);
-extern void setup_ioapic_dest(unsigned long mask);
+void mask_irq(unsigned int irq);
+void unmask_irq(unsigned int irq);
+void disable_8259A_irq(unsigned int irq);
+void enable_8259A_irq(unsigned int irq);
+int i8259A_irq_pending(unsigned int irq);
+void make_8259A_irq(unsigned int irq);
+void init_8259A(int aeoi);
+void FASTCALL(send_IPI_self(int vector));
+void init_VISWS_APIC_irqs(void);
+void setup_IO_APIC(void);
+void disable_IO_APIC(void);
+void print_IO_APIC(void);
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
+void send_IPI(int dest, int vector);
+void setup_ioapic_dest(cpumask_t mask);
 
 extern unsigned long io_apic_irqs;
 
diff -puN include/asm-i386/mach-bigsmp/mach_apic.h~cpumask_t-1 include/asm-i386/mach-bigsmp/mach_apic.h
--- 25/include/asm-i386/mach-bigsmp/mach_apic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-bigsmp/mach_apic.h	2003-07-03 01:19:48.000000000 -0700
@@ -20,7 +20,7 @@ static inline int apic_id_registered(voi
 }
 
 #define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
-static inline unsigned long target_cpus(void)
+static inline cpumask_t target_cpus(void)
 { 
 	return cpu_online_map;
 }
@@ -30,13 +30,14 @@ static inline unsigned long target_cpus(
 #define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
 
 #define APIC_BROADCAST_ID     (0x0f)
-static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) 
-{ 
+static inline unsigned long check_apicid_used(cpumask_const_t bitmap, int apicid)
+{
 	return 0;
-} 
+}
+
 static inline unsigned long check_apicid_present(int bit) 
 {
-	return (phys_cpu_present_map & (1 << bit));
+	return cpu_isset(bit, phys_cpu_present_map);
 }
 
 #define apicid_cluster(apicid) (apicid & 0xF0)
@@ -88,9 +89,9 @@ static inline int cpu_present_to_apicid(
 	return (int) bios_cpu_apicid[mps_cpu];
 }
 
-static inline unsigned long apicid_to_cpu_present(int phys_apicid)
+static inline cpumask_t apicid_to_cpu_present(int phys_apicid)
 {
-	return (1ul << phys_apicid);
+	return cpumask_of_cpu(phys_apicid);
 }
 
 extern volatile u8 cpu_2_logical_apicid[];
@@ -108,13 +109,13 @@ static inline int mpc_apic_id(struct mpc
 	        (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
 	        (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
 	        m->mpc_apicver);
-	return (m->mpc_apicid);
+	return m->mpc_apicid;
 }
 
-static inline ulong ioapic_phys_id_map(ulong phys_map)
+static inline cpumask_t ioapic_phys_id_map(cpumask_const_t phys_map)
 {
 	/* For clustered we don't have a good way to do this yet - hack */
-	return (0x0F);
+	return cpus_promote(0xFUL);
 }
 
 #define WAKE_SECONDARY_VIA_INIT
@@ -132,25 +133,25 @@ static inline int check_phys_apicid_pres
 	return (1);
 }
 
-static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask)
+static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
 {
 	int num_bits_set;
 	int cpus_found = 0;
 	int cpu;
 	int apicid;	
 
-	num_bits_set = hweight32(cpumask); 
+	num_bits_set = cpus_weight_const(cpumask);
 	/* Return id to all */
-	if (num_bits_set == 32)
+	if (num_bits_set == NR_CPUS)
 		return (int) 0xFF;
 	/* 
 	 * The cpus in the mask must all be on the apic cluster.  If are not 
 	 * on the same apicid cluster return default value of TARGET_CPUS. 
 	 */
-	cpu = ffs(cpumask)-1;
+	cpu = first_cpu_const(cpumask);
 	apicid = cpu_to_logical_apicid(cpu);
 	while (cpus_found < num_bits_set) {
-		if (cpumask & (1 << cpu)) {
+		if (cpu_isset_const(cpu, cpumask)) {
 			int new_apicid = cpu_to_logical_apicid(cpu);
 			if (apicid_cluster(apicid) != 
 					apicid_cluster(new_apicid)){
diff -puN include/asm-i386/mach-bigsmp/mach_ipi.h~cpumask_t-1 include/asm-i386/mach-bigsmp/mach_ipi.h
--- 25/include/asm-i386/mach-bigsmp/mach_ipi.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-bigsmp/mach_ipi.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,18 +1,19 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-inline void send_IPI_mask_sequence(int mask, int vector);
+inline void send_IPI_mask_sequence(cpumask_t mask, int vector);
 
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	unsigned long mask = cpu_online_map & ~(1 << smp_processor_id());
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
 
-	if (mask)
+	if (!cpus_empty(mask))
 		send_IPI_mask(mask, vector);
 }
 
diff -puN include/asm-i386/mach-default/mach_apic.h~cpumask_t-1 include/asm-i386/mach-default/mach_apic.h
--- 25/include/asm-i386/mach-default/mach_apic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-default/mach_apic.h	2003-07-03 01:19:48.000000000 -0700
@@ -5,12 +5,12 @@
 
 #define APIC_DFR_VALUE	(APIC_DFR_FLAT)
 
-static inline unsigned long target_cpus(void)
+static inline cpumask_t target_cpus(void)
 { 
 #ifdef CONFIG_SMP
 	return cpu_online_map;
 #else
-	return 1; 
+	return cpumask_of_cpu(0);
 #endif
 } 
 #define TARGET_CPUS (target_cpus())
@@ -23,14 +23,14 @@ static inline unsigned long target_cpus(
 
 #define APIC_BROADCAST_ID      0x0F
 
-static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) 
-{ 
-	return (bitmap & (1UL << apicid)); 
-} 
+static inline unsigned long check_apicid_used(cpumask_const_t bitmap, int apicid)
+{
+	return cpu_isset_const(apicid, bitmap);
+}
 
-static inline unsigned long check_apicid_present(int bit) 
+static inline unsigned long check_apicid_present(int bit)
 {
-	return (phys_cpu_present_map & (1UL << bit));
+	return cpu_isset(bit, phys_cpu_present_map);
 }
 
 /*
@@ -50,9 +50,11 @@ static inline void init_apic_ldr(void)
 	apic_write_around(APIC_LDR, val);
 }
 
-static inline unsigned long ioapic_phys_id_map(unsigned long phys_map)
+static inline cpumask_t ioapic_phys_id_map(cpumask_const_t phys_map)
 {
-	return phys_map;
+	cpumask_t ret;
+	cpus_copy_const(ret, phys_map);
+	return ret;
 }
 
 static inline void clustered_apic_check(void)
@@ -82,9 +84,9 @@ static inline int cpu_present_to_apicid(
 	return  mps_cpu;
 }
 
-static inline unsigned long apicid_to_cpu_present(int phys_apicid)
+static inline cpumask_t apicid_to_cpu_present(int phys_apicid)
 {
-	return (1ul << phys_apicid);
+	return cpumask_of_cpu(phys_apicid);
 }
 
 static inline int mpc_apic_id(struct mpc_config_processor *m, 
@@ -104,18 +106,17 @@ static inline void setup_portio_remap(vo
 
 static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
 {
-	return test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map);
+	return cpu_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
 }
 
 static inline int apic_id_registered(void)
 {
-	return (test_bit(GET_APIC_ID(apic_read(APIC_ID)), 
-						&phys_cpu_present_map));
+	return cpu_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
 }
 
-static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask)
+static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
 {
-	return cpumask;
+	return cpus_coerce_const(cpumask);
 }
 
 static inline void enable_apic_mode(void)
diff -puN include/asm-i386/mach-default/mach_ipi.h~cpumask_t-1 include/asm-i386/mach-default/mach_ipi.h
--- 25/include/asm-i386/mach-default/mach_ipi.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-default/mach_ipi.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,10 +1,10 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-inline void send_IPI_mask_bitmask(int mask, int vector);
+inline void send_IPI_mask_bitmask(cpumask_t mask, int vector);
 inline void __send_IPI_shortcut(unsigned int shortcut, int vector);
 
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_bitmask(mask, vector);
 }
diff -puN include/asm-i386/mach-es7000/mach_apic.h~cpumask_t-1 include/asm-i386/mach-es7000/mach_apic.h
--- 25/include/asm-i386/mach-es7000/mach_apic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-es7000/mach_apic.h	2003-07-03 01:19:48.000000000 -0700
@@ -11,12 +11,12 @@ static inline int apic_id_registered(voi
 	        return (1);
 }
 
-static inline unsigned long target_cpus(void)
+static inline cpumask_t target_cpus(void)
 { 
 #if defined CONFIG_ES7000_CLUSTERED_APIC
-	return (0xff);
+	return CPU_MASK_ALL;
 #else
-	return (bios_cpu_apicid[smp_processor_id()]);
+	return cpumask_of_cpu(bios_cpu_apicid[smp_processor_id()]);
 #endif
 }
 #define TARGET_CPUS	(target_cpus())
@@ -40,13 +40,13 @@ static inline unsigned long target_cpus(
 
 #define APIC_BROADCAST_ID	(0xff)
 
-static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) 
+static inline unsigned long check_apicid_used(cpumask_const_t bitmap, int apicid)
 { 
 	return 0;
 } 
 static inline unsigned long check_apicid_present(int bit) 
 {
-	return (phys_cpu_present_map & (1 << bit));
+	return cpu_isset(bit, phys_cpu_present_map);
 }
 
 #define apicid_cluster(apicid) (apicid & 0xF0)
@@ -88,7 +88,7 @@ static inline void clustered_apic_check(
 	int apic = bios_cpu_apicid[smp_processor_id()];
 	printk("Enabling APIC mode:  %s.  Using %d I/O APICs, target cpus %lx\n",
 		(apic_version[apic] == 0x14) ? 
-		"Physical Cluster" : "Logical Cluster", nr_ioapics, TARGET_CPUS);
+		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_coerce(TARGET_CPUS));
 }
 
 static inline int multi_timer_check(int apic, int irq)
@@ -110,10 +110,13 @@ static inline int cpu_present_to_apicid(
 		return (int) bios_cpu_apicid[mps_cpu];
 }
 
-static inline unsigned long apicid_to_cpu_present(int phys_apicid)
+static inline cpumask_t apicid_to_cpu_present(int phys_apicid)
 {
 	static int cpu = 0;
-	return (1ul << cpu++);
+	cpumask_t mask;
+	mask = cpumask_of_cpu(cpu);
+	++cpu;
+	return mask;
 }
 
 extern volatile u8 cpu_2_logical_apicid[];
@@ -123,7 +126,7 @@ static inline int cpu_to_logical_apicid(
        return (int)cpu_2_logical_apicid[cpu];
 }
 
-static inline int mpc_apic_id(struct mpc_config_processor *m, int quad)
+static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused)
 {
 	printk("Processor #%d %ld:%ld APIC version %d\n",
 	        m->mpc_apicid,
@@ -133,10 +136,10 @@ static inline int mpc_apic_id(struct mpc
 	return (m->mpc_apicid);
 }
 
-static inline ulong ioapic_phys_id_map(ulong phys_map)
+static inline cpumask_t ioapic_phys_id_map(cpumask_const_t phys_map)
 {
 	/* For clustered we don't have a good way to do this yet - hack */
-	return (0xff);
+	return cpus_promote(0xff);
 }
 
 
@@ -151,32 +154,30 @@ static inline int check_phys_apicid_pres
 	return (1);
 }
 
-static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask)
+static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
 {
 	int num_bits_set;
 	int cpus_found = 0;
 	int cpu;
 	int apicid;	
 
-	if (cpumask == TARGET_CPUS)
-		return cpumask;
-	num_bits_set = hweight32(cpumask); 
+	num_bits_set = cpus_weight_const(cpumask);
 	/* Return id to all */
-	if (num_bits_set == 32)
-		return TARGET_CPUS;
+	if (num_bits_set == NR_CPUS)
+		return 0xFF;
 	/* 
 	 * The cpus in the mask must all be on the apic cluster.  If are not 
 	 * on the same apicid cluster return default value of TARGET_CPUS. 
 	 */
-	cpu = ffs(cpumask)-1;
+	cpu = first_cpu_const(cpumask);
 	apicid = cpu_to_logical_apicid(cpu);
 	while (cpus_found < num_bits_set) {
-		if (cpumask & (1 << cpu)) {
+		if (cpu_isset_const(cpu, cpumask)) {
 			int new_apicid = cpu_to_logical_apicid(cpu);
 			if (apicid_cluster(apicid) != 
 					apicid_cluster(new_apicid)){
 				printk ("%s: Not a valid mask!\n",__FUNCTION__);
-				return TARGET_CPUS;
+				return 0xFF;
 			}
 			apicid = new_apicid;
 			cpus_found++;
diff -puN include/asm-i386/mach-es7000/mach_ipi.h~cpumask_t-1 include/asm-i386/mach-es7000/mach_ipi.h
--- 25/include/asm-i386/mach-es7000/mach_ipi.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-es7000/mach_ipi.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,18 +1,19 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-static inline void send_IPI_mask_sequence(int mask, int vector);
+static inline void send_IPI_mask_sequence(cpumask_t mask, int vector);
 
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	unsigned long mask = cpu_online_map & ~(1 << smp_processor_id());
-
-	if (mask)
+	cpumask_t mask = cpumask_of_cpu(smp_processor_id());
+	cpus_complement(mask);
+	cpus_and(mask, mask, cpu_online_map);
+	if (!cpus_empty(mask))
 		send_IPI_mask(mask, vector);
 }
 
diff -puN include/asm-i386/mach-numaq/mach_apic.h~cpumask_t-1 include/asm-i386/mach-numaq/mach_apic.h
--- 25/include/asm-i386/mach-numaq/mach_apic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-numaq/mach_apic.h	2003-07-03 01:19:48.000000000 -0700
@@ -6,7 +6,13 @@
 
 #define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
 
-#define TARGET_CPUS (~0UL)
+static inline cpumask_t target_cpus(void)
+{
+	cpumask_t tmp = CPU_MASK_ALL;
+	return tmp;
+}
+
+#define TARGET_CPUS (target_cpus())
 
 #define NO_BALANCE_IRQ (1)
 #define esr_disable (1)
@@ -15,13 +21,13 @@
 #define INT_DEST_MODE 0     /* physical delivery on LOCAL quad */
  
 #define APIC_BROADCAST_ID      0x0F
-#define check_apicid_used(bitmap, apicid) ((bitmap) & (1 << (apicid)))
-#define check_apicid_present(bit) (phys_cpu_present_map & (1 << bit))
+#define check_apicid_used(bitmap, apicid) cpu_isset_const(apicid, bitmap)
+#define check_apicid_present(bit) cpu_isset(bit, phys_cpu_present_map)
 #define apicid_cluster(apicid) (apicid & 0xF0)
 
 static inline int apic_id_registered(void)
 {
-	return (1);
+	return 1;
 }
 
 static inline void init_apic_ldr(void)
@@ -41,13 +47,13 @@ static inline void clustered_apic_check(
  */
 static inline int multi_timer_check(int apic, int irq)
 {
-	return (apic != 0 && irq == 0);
+	return apic != 0 && irq == 0;
 }
 
-static inline ulong ioapic_phys_id_map(ulong phys_map)
+static inline cpumask_t ioapic_phys_id_map(cpumask_const_t phys_map)
 {
 	/* We don't have a good way to do this yet - hack */
-	return 0xf;
+	return cpus_promote(0xFUL);
 }
 
 /* Mapping from cpu number to logical apicid */
@@ -59,22 +65,25 @@ static inline int cpu_to_logical_apicid(
 
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
-	return ( ((mps_cpu/4)*16) + (1<<(mps_cpu%4)) );
+	return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
 }
 
 static inline int generate_logical_apicid(int quad, int phys_apicid)
 {
-	return ( (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1) );
+	return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
 }
 
 static inline int apicid_to_node(int logical_apicid) 
 {
-	return (logical_apicid >> 4);
+	return logical_apicid >> 4;
 }
 
-static inline unsigned long apicid_to_cpu_present(int logical_apicid)
+static inline cpumask_t apicid_to_cpu_present(int logical_apicid)
 {
-	return ( (logical_apicid&0xf) << (4*apicid_to_node(logical_apicid)) );
+	int node = apicid_to_node(logical_apicid);
+	int cpu = __ffs(logical_apicid & 0xf);
+
+	return cpumask_of_cpu(cpu + 4*node);
 }
 
 static inline int mpc_apic_id(struct mpc_config_processor *m, 
@@ -115,7 +124,7 @@ static inline void enable_apic_mode(void
  * We use physical apicids here, not logical, so just return the default
  * physical broadcast to stop people from breaking us
  */
-static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask)
+static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
 {
 	return (int) 0xF;
 }
diff -puN include/asm-i386/mach-numaq/mach_ipi.h~cpumask_t-1 include/asm-i386/mach-numaq/mach_ipi.h
--- 25/include/asm-i386/mach-numaq/mach_ipi.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-numaq/mach_ipi.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,18 +1,19 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-static inline void send_IPI_mask_sequence(int mask, int vector);
+static inline void send_IPI_mask_sequence(cpumask_t, int vector);
 
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	unsigned long mask = cpu_online_map & ~(1 << smp_processor_id());
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
 
-	if (mask)
+	if (!cpus_empty(mask))
 		send_IPI_mask(mask, vector);
 }
 
diff -puN include/asm-i386/mach-summit/mach_apic.h~cpumask_t-1 include/asm-i386/mach-summit/mach_apic.h
--- 25/include/asm-i386/mach-summit/mach_apic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-summit/mach_apic.h	2003-07-03 01:19:48.000000000 -0700
@@ -18,9 +18,10 @@ static inline unsigned long xapic_phys_t
 
 #define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
 
-static inline unsigned long target_cpus(void)
+static inline cpumask_t target_cpus(void)
 {
-	return (~0UL);
+	cpumask_t tmp = CPU_MASK_ALL;
+	return tmp;
 } 
 #define TARGET_CPUS	(target_cpus())
 
@@ -28,7 +29,7 @@ static inline unsigned long target_cpus(
 #define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
 
 #define APIC_BROADCAST_ID     (0x0F)
-static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) 
+static inline unsigned long check_apicid_used(cpumask_const_t bitmap, int apicid)
 {
 	return 0;
 } 
@@ -72,7 +73,7 @@ static inline void clustered_apic_check(
 
 static inline int apicid_to_node(int logical_apicid)
 {
-	return (logical_apicid >> 5);          /* 2 clusterids per CEC */
+	return logical_apicid >> 5;          /* 2 clusterids per CEC */
 }
 
 /* Mapping from cpu number to logical apicid */
@@ -87,15 +88,15 @@ static inline int cpu_present_to_apicid(
 	return (int) bios_cpu_apicid[mps_cpu];
 }
 
-static inline ulong ioapic_phys_id_map(ulong phys_map)
+static inline cpumask_t ioapic_phys_id_map(cpumask_const_t phys_id_map)
 {
 	/* For clustered we don't have a good way to do this yet - hack */
-	return 0x0F;
+	return cpus_promote(0x0F);
 }
 
-static inline unsigned long apicid_to_cpu_present(int apicid)
+static inline cpumask_t apicid_to_cpu_present(int apicid)
 {
-	return 1;
+	return cpumask_of_cpu(0);
 }
 
 static inline int mpc_apic_id(struct mpc_config_processor *m, 
@@ -122,25 +123,25 @@ static inline void enable_apic_mode(void
 {
 }
 
-static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask)
+static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
 {
 	int num_bits_set;
 	int cpus_found = 0;
 	int cpu;
 	int apicid;	
 
-	num_bits_set = hweight32(cpumask); 
+	num_bits_set = cpus_weight_const(cpumask);
 	/* Return id to all */
-	if (num_bits_set == 32)
+	if (num_bits_set == NR_CPUS)
 		return (int) 0xFF;
 	/* 
 	 * The cpus in the mask must all be on the apic cluster.  If are not 
 	 * on the same apicid cluster return default value of TARGET_CPUS. 
 	 */
-	cpu = ffs(cpumask)-1;
+	cpu = first_cpu_const(cpumask);
 	apicid = cpu_to_logical_apicid(cpu);
 	while (cpus_found < num_bits_set) {
-		if (cpumask & (1 << cpu)) {
+		if (cpu_isset_const(cpu, cpumask)) {
 			int new_apicid = cpu_to_logical_apicid(cpu);
 			if (apicid_cluster(apicid) != 
 					apicid_cluster(new_apicid)){
diff -puN include/asm-i386/mach-summit/mach_ipi.h~cpumask_t-1 include/asm-i386/mach-summit/mach_ipi.h
--- 25/include/asm-i386/mach-summit/mach_ipi.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-summit/mach_ipi.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,18 +1,19 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-inline void send_IPI_mask_sequence(int mask, int vector);
+inline void send_IPI_mask_sequence(cpumask_t mask, int vector);
 
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	unsigned long mask = cpu_online_map & ~(1 << smp_processor_id());
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
 
-	if (mask)
+	if (!cpus_empty(mask))
 		send_IPI_mask(mask, vector);
 }
 
diff -puN include/asm-i386/mach-visws/mach_apic.h~cpumask_t-1 include/asm-i386/mach-visws/mach_apic.h
--- 25/include/asm-i386/mach-visws/mach_apic.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mach-visws/mach_apic.h	2003-07-03 01:19:48.000000000 -0700
@@ -12,17 +12,16 @@
 #ifdef CONFIG_SMP
  #define TARGET_CPUS cpu_online_map
 #else
- #define TARGET_CPUS 0x01
+ #define TARGET_CPUS cpumask_of_cpu(0)
 #endif
 
 #define APIC_BROADCAST_ID      0x0F
-#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid))
-#define check_apicid_present(bit) (phys_cpu_present_map & (1 << bit))
+#define check_apicid_used(bitmap, apicid)	cpu_isset_const(apicid, bitmap)
+#define check_apicid_present(bit)		cpu_isset(bit, phys_cpu_present_map)
 
 static inline int apic_id_registered(void)
 {
-	return (test_bit(GET_APIC_ID(apic_read(APIC_ID)), 
-						&phys_cpu_present_map));
+	return cpu_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
 }
 
 /*
@@ -61,9 +60,9 @@ static inline int cpu_present_to_apicid(
 	return mps_cpu;
 }
 
-static inline unsigned long apicid_to_cpu_present(int apicid)
+static inline cpumask_t apicid_to_cpu_present(int apicid)
 {
-	return (1ul << apicid);
+	return cpumask_of_cpu(apicid);
 }
 
 #define WAKE_SECONDARY_VIA_INIT
@@ -78,11 +77,11 @@ static inline void enable_apic_mode(void
 
 static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
 {
-	return test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map);
+	return cpu_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
 }
 
-static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask)
+static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
 {
-	return cpumask;
+	return cpus_coerce_const(cpumask);
 }
 #endif /* __ASM_MACH_APIC_H */
diff -puN include/asm-i386/mmu_context.h~cpumask_t-1 include/asm-i386/mmu_context.h
--- 25/include/asm-i386/mmu_context.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mmu_context.h	2003-07-03 01:19:48.000000000 -0700
@@ -26,12 +26,12 @@ static inline void switch_mm(struct mm_s
 {
 	if (likely(prev != next)) {
 		/* stop flush ipis for the previous mm */
-		clear_bit(cpu, &prev->cpu_vm_mask);
+		cpu_clear(cpu, prev->cpu_vm_mask);
 #ifdef CONFIG_SMP
 		cpu_tlbstate[cpu].state = TLBSTATE_OK;
 		cpu_tlbstate[cpu].active_mm = next;
 #endif
-		set_bit(cpu, &next->cpu_vm_mask);
+		cpu_set(cpu, next->cpu_vm_mask);
 
 		/* Re-load page tables */
 		load_cr3(next->pgd);
@@ -47,7 +47,7 @@ static inline void switch_mm(struct mm_s
 		cpu_tlbstate[cpu].state = TLBSTATE_OK;
 		BUG_ON(cpu_tlbstate[cpu].active_mm != next);
 
-		if (!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
 			/* We were in lazy tlb mode and leave_mm disabled 
 			 * tlb flush IPI delivery. We must reload %cr3.
 			 */
diff -puN include/asm-i386/mpspec.h~cpumask_t-1 include/asm-i386/mpspec.h
--- 25/include/asm-i386/mpspec.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/mpspec.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,6 +1,7 @@
 #ifndef __ASM_MPSPEC_H
 #define __ASM_MPSPEC_H
 
+#include <linux/cpumask.h>
 #include <asm/mpspec_def.h>
 #include <mach_mpspec.h>
 
@@ -11,7 +12,7 @@ extern int quad_local_to_mp_bus_id [NR_C
 extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
 
 extern unsigned int boot_cpu_physical_apicid;
-extern unsigned long phys_cpu_present_map;
+extern cpumask_t phys_cpu_present_map;
 extern int smp_found_config;
 extern void find_smp_config (void);
 extern void get_smp_config (void);
diff -puN include/asm-i386/numaq.h~cpumask_t-1 include/asm-i386/numaq.h
--- 25/include/asm-i386/numaq.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/numaq.h	2003-07-03 01:19:48.000000000 -0700
@@ -28,7 +28,7 @@
 
 #ifdef CONFIG_X86_NUMAQ
 
-#define MAX_NUMNODES		8
+#define MAX_NUMNODES		16
 extern void get_memcfg_numaq(void);
 #define get_memcfg_numa() get_memcfg_numaq()
 
@@ -159,7 +159,7 @@ struct sys_cfg_data {
 
 static inline unsigned long *get_zholes_size(int nid)
 {
-	return 0;
+	return NULL;
 }
 #endif /* CONFIG_X86_NUMAQ */
 #endif /* NUMAQ_H */
diff -puN include/asm-i386/smp.h~cpumask_t-1 include/asm-i386/smp.h
--- 25/include/asm-i386/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -8,6 +8,7 @@
 #include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -31,9 +32,7 @@
  */
  
 extern void smp_alloc_memory(void);
-extern unsigned long phys_cpu_present_map;
-extern unsigned long cpu_online_map;
-extern volatile unsigned long smp_invalidate_needed;
+extern cpumask_t phys_cpu_present_map;
 extern int pic_mode;
 extern int smp_num_siblings;
 extern int cpu_sibling_map[];
@@ -54,37 +53,19 @@ extern void zap_low_mappings (void);
  */
 #define smp_processor_id() (current_thread_info()->cpu)
 
-extern volatile unsigned long cpu_callout_map;
+extern volatile cpumask_t cpu_callout_map;
 
-#define cpu_possible(cpu) (cpu_callout_map & (1<<(cpu)))
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
-
-#define for_each_cpu(cpu, mask) \
-	for(mask = cpu_online_map; \
-	    cpu = __ffs(mask), mask != 0; \
-	    mask &= ~(1<<cpu))
-
-extern inline unsigned int num_online_cpus(void)
-{
-	return hweight32(cpu_online_map);
-}
+#define cpu_possible(cpu) cpu_isset(cpu, cpu_callout_map)
 
 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
 static inline int num_booting_cpus(void)
 {
-	return hweight32(cpu_callout_map);
+	return cpus_weight(cpu_callout_map);
 }
 
 extern void map_cpu_to_logical_apicid(void);
 extern void unmap_cpu_to_logical_apicid(int cpu);
 
-extern inline unsigned int any_online_cpu(unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-
-	return NR_CPUS;
-}
 #ifdef CONFIG_X86_LOCAL_APIC
 
 #ifdef APIC_DEFINITION
diff -puN include/asm-i386/topology.h~cpumask_t-1 include/asm-i386/topology.h
--- 25/include/asm-i386/topology.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-i386/topology.h	2003-07-03 01:19:48.000000000 -0700
@@ -31,9 +31,11 @@
 
 #include <asm/mpspec.h>
 
+#include <linux/cpumask.h>
+
 /* Mappings between logical cpu number and node number */
-extern volatile unsigned long node_2_cpu_mask[];
-extern volatile int cpu_2_node[];
+extern cpumask_t node_2_cpu_mask[];
+extern int cpu_2_node[];
 
 /* Returns the number of the node containing CPU 'cpu' */
 static inline int cpu_to_node(int cpu)
@@ -49,7 +51,7 @@ static inline int cpu_to_node(int cpu)
 #define parent_node(node) (node)
 
 /* Returns a bitmask of CPUs on Node 'node'. */
-static inline unsigned long node_to_cpumask(int node)
+static inline cpumask_t node_to_cpumask(int node)
 {
 	return node_2_cpu_mask[node];
 }
@@ -57,14 +59,15 @@ static inline unsigned long node_to_cpum
 /* Returns the number of the first CPU on Node 'node'. */
 static inline int node_to_first_cpu(int node)
 { 
-	return __ffs(node_to_cpumask(node));
+	cpumask_t mask = node_to_cpumask(node);
+	return first_cpu(mask);
 }
 
 /* Returns the number of the first MemBlk on Node 'node' */
 #define node_to_memblk(node) (node)
 
 /* Returns the number of the node containing PCI bus 'bus' */
-static inline unsigned long pcibus_to_cpumask(int bus)
+static inline cpumask_t pcibus_to_cpumask(int bus)
 {
 	return node_to_cpumask(mp_bus_id_to_node[bus]);
 }
diff -puN include/asm-ia64/bitops.h~cpumask_t-1 include/asm-ia64/bitops.h
--- 25/include/asm-ia64/bitops.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-ia64/bitops.h	2003-07-03 01:19:48.000000000 -0700
@@ -409,7 +409,7 @@ found_middle:
  * Find next bit in a bitmap reasonably efficiently..
  */
 static inline int
-find_next_bit (void *addr, unsigned long size, unsigned long offset)
+find_next_bit(const void *addr, unsigned long size, unsigned long offset)
 {
 	unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
 	unsigned long result = offset & ~63UL;
diff -puN include/asm-ia64/smp.h~cpumask_t-1 include/asm-ia64/smp.h
--- 25/include/asm-ia64/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-ia64/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/threads.h>
 #include <linux/kernel.h>
+#include <linux/cpumask.h>
 
 #include <asm/bitops.h>
 #include <asm/io.h>
@@ -37,8 +38,8 @@ extern struct smp_boot_data {
 
 extern char no_int_routing __initdata;
 
-extern unsigned long phys_cpu_present_map;
-extern volatile unsigned long cpu_online_map;
+extern cpumask_t phys_cpu_present_map;
+extern cpumask_t cpu_online_map;
 extern unsigned long ipi_base_addr;
 extern unsigned char smp_int_redirect;
 
@@ -47,22 +48,7 @@ extern volatile int ia64_cpu_to_sapicid[
 
 extern unsigned long ap_wakeup_vector;
 
-#define cpu_possible(cpu)	(phys_cpu_present_map & (1UL << (cpu)))
-#define cpu_online(cpu)		(cpu_online_map & (1UL << (cpu)))
-
-static inline unsigned int
-num_online_cpus (void)
-{
-	return hweight64(cpu_online_map);
-}
-
-static inline unsigned int
-any_online_cpu (unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-	return NR_CPUS;
-}
+#define cpu_possible(cpu)	cpu_isset(cpu, phys_cpu_present_map)
 
 /*
  * Function to map hard smp processor id to logical id.  Slow, so don't use this in
diff -puN include/asm-mips64/smp.h~cpumask_t-1 include/asm-mips64/smp.h
--- 25/include/asm-mips64/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-mips64/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -17,6 +17,7 @@
 
 #include <linux/bitops.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <asm/atomic.h>
 
 #define smp_processor_id()	(current_thread_info()->cpu)
@@ -45,56 +46,17 @@ extern struct call_data_struct *call_dat
 #define SMP_RESCHEDULE_YOURSELF	0x1	/* XXX braindead */
 #define SMP_CALL_FUNCTION	0x2
 
-#if (NR_CPUS <= _MIPS_SZLONG)
-
-typedef unsigned long   cpumask_t;
-
-#define CPUMASK_CLRALL(p)	(p) = 0
-#define CPUMASK_SETB(p, bit)	(p) |= 1UL << (bit)
-#define CPUMASK_CLRB(p, bit)	(p) &= ~(1UL << (bit))
-#define CPUMASK_TSTB(p, bit)	((p) & (1UL << (bit)))
-
-#elif (NR_CPUS <= 128)
-
-/*
- * The foll should work till 128 cpus.
- */
-#define CPUMASK_SIZE		(NR_CPUS/_MIPS_SZLONG)
-#define CPUMASK_INDEX(bit)	((bit) >> 6)
-#define CPUMASK_SHFT(bit)	((bit) & 0x3f)
-
-typedef struct {
-	unsigned long	_bits[CPUMASK_SIZE];
-} cpumask_t;
-
-#define	CPUMASK_CLRALL(p)	(p)._bits[0] = 0, (p)._bits[1] = 0
-#define CPUMASK_SETB(p, bit)	(p)._bits[CPUMASK_INDEX(bit)] |= \
-					(1UL << CPUMASK_SHFT(bit))
-#define CPUMASK_CLRB(p, bit)	(p)._bits[CPUMASK_INDEX(bit)] &= \
-					~(1UL << CPUMASK_SHFT(bit))
-#define CPUMASK_TSTB(p, bit)	((p)._bits[CPUMASK_INDEX(bit)] & \
-					(1UL << CPUMASK_SHFT(bit)))
-
-#else
-#error cpumask macros only defined for 128p kernels
-#endif
-
 extern cpumask_t phys_cpu_present_map;
 extern cpumask_t cpu_online_map;
 
-#define cpu_possible(cpu) (phys_cpu_present_map & (1<<(cpu)))
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
-
-extern inline unsigned int num_online_cpus(void)
-{
-	return hweight32(cpu_online_map);
-}
+#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map)
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
 
-extern volatile unsigned long cpu_callout_map;
+extern cpumask_t cpu_callout_map;
 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
 static inline int num_booting_cpus(void)
 {
-	return hweight32(cpu_callout_map);
+	return cpus_weight(cpu_callout_map);
 }
 
 #endif /* CONFIG_SMP */
diff -puN include/asm-mips/smp.h~cpumask_t-1 include/asm-mips/smp.h
--- 25/include/asm-mips/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-mips/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -17,6 +17,7 @@
 
 #include <linux/bitops.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <asm/atomic.h>
 
 #define smp_processor_id()	(current_thread_info()->cpu)
@@ -45,56 +46,17 @@ extern struct call_data_struct *call_dat
 #define SMP_RESCHEDULE_YOURSELF	0x1	/* XXX braindead */
 #define SMP_CALL_FUNCTION	0x2
 
-#if (NR_CPUS <= _MIPS_SZLONG)
-
-typedef unsigned long   cpumask_t;
-
-#define CPUMASK_CLRALL(p)	(p) = 0
-#define CPUMASK_SETB(p, bit)	(p) |= 1UL << (bit)
-#define CPUMASK_CLRB(p, bit)	(p) &= ~(1UL << (bit))
-#define CPUMASK_TSTB(p, bit)	((p) & (1UL << (bit)))
-
-#elif (NR_CPUS <= 128)
-
-/*
- * The foll should work till 128 cpus.
- */
-#define CPUMASK_SIZE		(NR_CPUS/_MIPS_SZLONG)
-#define CPUMASK_INDEX(bit)	((bit) >> 6)
-#define CPUMASK_SHFT(bit)	((bit) & 0x3f)
-
-typedef struct {
-	unsigned long	_bits[CPUMASK_SIZE];
-} cpumask_t;
-
-#define	CPUMASK_CLRALL(p)	(p)._bits[0] = 0, (p)._bits[1] = 0
-#define CPUMASK_SETB(p, bit)	(p)._bits[CPUMASK_INDEX(bit)] |= \
-					(1UL << CPUMASK_SHFT(bit))
-#define CPUMASK_CLRB(p, bit)	(p)._bits[CPUMASK_INDEX(bit)] &= \
-					~(1UL << CPUMASK_SHFT(bit))
-#define CPUMASK_TSTB(p, bit)	((p)._bits[CPUMASK_INDEX(bit)] & \
-					(1UL << CPUMASK_SHFT(bit)))
-
-#else
-#error cpumask macros only defined for 128p kernels
-#endif
-
 extern cpumask_t phys_cpu_present_map;
 extern cpumask_t cpu_online_map;
 
-#define cpu_possible(cpu) (phys_cpu_present_map & (1<<(cpu)))
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
-
-extern inline unsigned int num_online_cpus(void)
-{
-	return hweight32(cpu_online_map);
-}
+#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map)
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
 
-extern volatile unsigned long cpu_callout_map;
+extern cpumask_t cpu_callout_map;
 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
 static inline int num_booting_cpus(void)
 {
-	return hweight32(cpu_callout_map);
+	return cpus_weight(cpu_callout_map);
 }
 
 #endif /* CONFIG_SMP */
diff -puN include/asm-parisc/smp.h~cpumask_t-1 include/asm-parisc/smp.h
--- 25/include/asm-parisc/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-parisc/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -14,9 +14,10 @@
 #ifndef ASSEMBLY
 #include <linux/bitops.h>
 #include <linux/threads.h>	/* for NR_CPUS */
+#include <linux/cpumask.h>
 typedef unsigned long address_t;
 
-extern volatile unsigned long cpu_online_map;
+extern cpumask_t cpu_online_map;
 
 
 /*
@@ -51,22 +52,10 @@ extern void smp_send_reschedule(int cpu)
 extern unsigned long cpu_present_mask;
 
 #define smp_processor_id()	(current_thread_info()->cpu)
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
 
-#define cpu_possible(cpu)       (cpu_present_mask & (1<<(cpu)))
+#define cpu_possible(cpu)       cpu_isset(cpu, cpu_present_mask)
 
-extern inline unsigned int num_online_cpus(void)
-{
-	return hweight32(cpu_online_map);
-}
-
-extern inline unsigned int any_online_cpu(unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-
-	return NR_CPUS;
-}
 #endif /* CONFIG_SMP */
 
 #define NO_PROC_ID		0xFF		/* No processor magic marker */
diff -puN include/asm-ppc64/mmu_context.h~cpumask_t-1 include/asm-ppc64/mmu_context.h
--- 25/include/asm-ppc64/mmu_context.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-ppc64/mmu_context.h	2003-07-03 01:19:48.000000000 -0700
@@ -143,7 +143,7 @@ switch_mm(struct mm_struct *prev, struct
 	  struct task_struct *tsk, int cpu)
 {
 	flush_stab(tsk, next);
-	set_bit(cpu, &next->cpu_vm_mask);
+	cpu_set(cpu, next->cpu_vm_mask);
 }
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
diff -puN include/asm-ppc64/smp.h~cpumask_t-1 include/asm-ppc64/smp.h
--- 25/include/asm-ppc64/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-ppc64/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -19,6 +19,7 @@
 
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <linux/kernel.h>
 
 #ifdef CONFIG_SMP
@@ -27,31 +28,13 @@
 
 #include <asm/paca.h>
 
-extern unsigned long cpu_online_map;
-
 extern void smp_message_pass(int target, int msg, unsigned long data, int wait);
 extern void smp_send_tlb_invalidate(int);
 extern void smp_send_xmon_break(int cpu);
 struct pt_regs;
 extern void smp_message_recv(int, struct pt_regs *);
 
-#define cpu_online(cpu)	test_bit((cpu), &cpu_online_map)
-
 #define cpu_possible(cpu)	paca[cpu].active
-
-static inline unsigned int num_online_cpus(void)
-{
-	return hweight64(cpu_online_map);
-}
-
-static inline unsigned int any_online_cpu(unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-
-	return NR_CPUS;
-}
-
 #define smp_processor_id() (get_paca()->xPacaIndex)
 
 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
diff -puN include/asm-ppc64/tlb.h~cpumask_t-1 include/asm-ppc64/tlb.h
--- 25/include/asm-ppc64/tlb.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-ppc64/tlb.h	2003-07-03 01:19:48.000000000 -0700
@@ -49,6 +49,7 @@ static inline void __tlb_remove_tlb_entr
 	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
 	unsigned long i = batch->index;
 	pte_t pte;
+	cpumask_t local_cpumask = cpumask_of_cpu(cpu);
 
 	if (pte_val(*ptep) & _PAGE_HASHPTE) {
 		pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
@@ -61,7 +62,7 @@ static inline void __tlb_remove_tlb_entr
 			if (i == PPC64_TLB_BATCH_NR) {
 				int local = 0;
 
-				if (tlb->mm->cpu_vm_mask == (1UL << cpu))
+				if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
 					local = 1;
 
 				flush_hash_range(tlb->mm->context, i, local);
@@ -78,8 +79,9 @@ static inline void tlb_flush(struct mmu_
 	int cpu = smp_processor_id();
 	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
 	int local = 0;
+	cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
 
-	if (tlb->mm->cpu_vm_mask == (1UL << smp_processor_id()))
+	if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
 		local = 1;
 
 	flush_hash_range(tlb->mm->context, batch->index, local);
diff -puN include/asm-ppc/smp.h~cpumask_t-1 include/asm-ppc/smp.h
--- 25/include/asm-ppc/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-ppc/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
+#include <linux/cpumask.h>
 
 #ifdef CONFIG_SMP
 
@@ -28,8 +29,8 @@ struct cpuinfo_PPC {
 };
 
 extern struct cpuinfo_PPC cpu_data[];
-extern unsigned long cpu_online_map;
-extern unsigned long cpu_possible_map;
+extern cpumask_t cpu_online_map;
+extern cpumask_t cpu_possible_map;
 extern unsigned long smp_proc_in_lock[];
 extern volatile unsigned long cpu_callin_map[];
 extern int smp_tb_synchronized;
@@ -45,21 +46,8 @@ extern void smp_local_timer_interrupt(st
 
 #define smp_processor_id() (current_thread_info()->cpu)
 
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
-#define cpu_possible(cpu) (cpu_possible_map & (1<<(cpu)))
-
-extern inline unsigned int num_online_cpus(void)
-{
-	return hweight32(cpu_online_map);
-}
-
-extern inline unsigned int any_online_cpu(unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-
-	return NR_CPUS;
-}
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
+#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map)
 
 extern int __cpu_up(unsigned int cpu);
 
diff -puN include/asm-s390/smp.h~cpumask_t-1 include/asm-s390/smp.h
--- 25/include/asm-s390/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-s390/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -11,6 +11,7 @@
 
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <linux/bitops.h>
 
 #if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
@@ -28,8 +29,8 @@ typedef struct
 	__u16      cpu;
 } sigp_info;
 
-extern volatile unsigned long cpu_online_map;
-extern volatile unsigned long cpu_possible_map;
+extern cpumask_t cpu_online_map;
+extern cpumask_t cpu_possible_map;
 
 #define NO_PROC_ID		0xFF		/* No processor magic marker */
 
@@ -47,25 +48,8 @@ extern volatile unsigned long cpu_possib
 
 #define smp_processor_id() (current_thread_info()->cpu)
 
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
-#define cpu_possible(cpu) (cpu_possible_map & (1<<(cpu)))
-
-extern inline unsigned int num_online_cpus(void)
-{
-#ifndef __s390x__
-	return hweight32(cpu_online_map);
-#else /* __s390x__ */
-	return hweight64(cpu_online_map);
-#endif /* __s390x__ */
-}
-
-extern inline unsigned int any_online_cpu(unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-
-	return NR_CPUS;
-}
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
+#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map)
 
 extern __inline__ __u16 hard_smp_processor_id(void)
 {
diff -puN include/asm-sparc64/smp.h~cpumask_t-1 include/asm-sparc64/smp.h
--- 25/include/asm-sparc64/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-sparc64/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -8,6 +8,7 @@
 
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <asm/asi.h>
 #include <asm/starfire.h>
 #include <asm/spitfire.h>
@@ -68,11 +69,11 @@ extern cpuinfo_sparc cpu_data[NR_CPUS];
 
 extern unsigned char boot_cpu_id;
 
-extern unsigned long phys_cpu_present_map;
-#define cpu_possible(cpu)	(phys_cpu_present_map & (1UL << (cpu)))
+extern cpumask_t phys_cpu_present_map;
+#define cpu_possible(cpu)	cpu_isset(cpu, phys_cpu_present_map)
 
 extern unsigned long cpu_online_map;
-#define cpu_online(cpu)		(cpu_online_map & (1UL << (cpu)))
+#define cpu_online(cpu)		cpu_isset(cpu, cpu_online_map)
 
 extern atomic_t sparc64_num_cpus_online;
 #define num_online_cpus()	(atomic_read(&sparc64_num_cpus_online))
@@ -80,13 +81,6 @@ extern atomic_t sparc64_num_cpus_online;
 extern atomic_t sparc64_num_cpus_possible;
 #define num_possible_cpus()	(atomic_read(&sparc64_num_cpus_possible))
 
-static inline unsigned int any_online_cpu(unsigned long mask)
-{
-	if ((mask &= cpu_online_map) != 0UL)
-		return __ffs(mask);
-	return NR_CPUS;
-}
-
 /*
  *	General functions that each host system must provide.
  */
diff -puN include/asm-sparc/smp.h~cpumask_t-1 include/asm-sparc/smp.h
--- 25/include/asm-sparc/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-sparc/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -8,6 +8,7 @@
 
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <asm/head.h>
 #include <asm/btfixup.h>
 
diff -puN include/asm-um/smp.h~cpumask_t-1 include/asm-um/smp.h
--- 25/include/asm-um/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-um/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -1,13 +1,14 @@
 #ifndef __UM_SMP_H
 #define __UM_SMP_H
 
-extern unsigned long cpu_online_map;
-
 #ifdef CONFIG_SMP
 
 #include "linux/config.h"
 #include "linux/bitops.h"
 #include "asm/current.h"
+#include "linux/cpumask.h"
+
+extern cpumask_t cpu_online_map;
 
 #define smp_processor_id() (current->thread_info->cpu)
 #define cpu_logical_map(n) (n)
@@ -16,16 +17,11 @@ extern unsigned long cpu_online_map;
 extern int hard_smp_processor_id(void);
 #define NO_PROC_ID -1
 
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
 
 extern int ncpus;
 #define cpu_possible(cpu) (cpu < ncpus)
 
-extern inline unsigned int num_online_cpus(void)
-{
-	return(hweight32(cpu_online_map));
-}
-
 extern inline void smp_cpus_done(unsigned int maxcpus)
 {
 }
diff -puN include/asm-x86_64/mpspec.h~cpumask_t-1 include/asm-x86_64/mpspec.h
--- 25/include/asm-x86_64/mpspec.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-x86_64/mpspec.h	2003-07-03 01:19:48.000000000 -0700
@@ -171,7 +171,7 @@ extern int quad_local_to_mp_bus_id [NR_C
 extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
 
 extern unsigned int boot_cpu_physical_apicid;
-extern unsigned long phys_cpu_present_map;
+extern cpumask_t phys_cpu_present_map;
 extern int smp_found_config;
 extern void find_smp_config (void);
 extern void get_smp_config (void);
diff -puN include/asm-x86_64/smp.h~cpumask_t-1 include/asm-x86_64/smp.h
--- 25/include/asm-x86_64/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-x86_64/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -7,6 +7,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #include <linux/bitops.h>
 extern int disable_apic;
 #endif
@@ -35,8 +36,8 @@ struct pt_regs;
  */
  
 extern void smp_alloc_memory(void);
-extern unsigned long phys_cpu_present_map;
-extern unsigned long cpu_online_map;
+extern cpumask_t phys_cpu_present_map;
+extern cpumask_t cpu_online_map;
 extern volatile unsigned long smp_invalidate_needed;
 extern int pic_mode;
 extern void smp_flush_tlb(void);
@@ -56,35 +57,17 @@ void smp_stop_cpu(void);
  * compresses data structures.
  */
 
-extern volatile unsigned long cpu_callout_map;
+extern cpumask_t cpu_callout_map;
 
-#define cpu_possible(cpu) (cpu_callout_map & (1<<(cpu)))
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
-
-#define for_each_cpu(cpu, mask) \
-	for(mask = cpu_online_map; \
-	    cpu = __ffs(mask), mask != 0; \
-	    mask &= ~(1UL<<cpu))
-
-extern inline unsigned int any_online_cpu(unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-
-	return NR_CPUS; 
-} 
-
-extern inline unsigned int num_online_cpus(void)
-{ 
-	return hweight32(cpu_online_map);
-} 
+#define cpu_possible(cpu) cpu_isset(cpu, cpu_callout_map)
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
 
 static inline int num_booting_cpus(void)
 {
-	return hweight32(cpu_callout_map);
+	return cpus_weight(cpu_callout_map);
 }
 
-extern volatile unsigned long cpu_callout_map;
+extern cpumask_t cpu_callout_map;
 
 #define smp_processor_id() read_pda(cpunumber)
 
@@ -104,7 +87,7 @@ extern inline int safe_smp_processor_id(
 		return hard_smp_processor_id();
 } 
 
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
 #endif /* !ASSEMBLY */
 
 #define NO_PROC_ID		0xFF		/* No processor magic marker */
@@ -117,7 +100,6 @@ extern inline int safe_smp_processor_id(
 #ifndef CONFIG_SMP
 #define stack_smp_processor_id() 0
 #define safe_smp_processor_id() 0
-#define for_each_cpu(x) (x)=0;
 #define cpu_logical_map(x) (x)
 #else
 #include <asm/thread_info.h>
diff -puN include/asm-x86_64/topology.h~cpumask_t-1 include/asm-x86_64/topology.h
--- 25/include/asm-x86_64/topology.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/asm-x86_64/topology.h	2003-07-03 01:19:48.000000000 -0700
@@ -8,13 +8,13 @@
 /* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
 
 extern int fake_node;
-extern unsigned long cpu_online_map;
+extern cpumask_t cpu_online_map;
 
 #define cpu_to_node(cpu)		(fake_node ? 0 : (cpu))
 #define memblk_to_node(memblk) 	(fake_node ? 0 : (memblk))
 #define parent_node(node)		(node)
 #define node_to_first_cpu(node) 	(fake_node ? 0 : (node))
-#define node_to_cpu_mask(node)	(fake_node ? cpu_online_map : (1UL << (node)))
+#define node_to_cpu_mask(node)	(fake_node ? cpu_online_map : cpumask_of_cpu(node))
 #define node_to_memblk(node)		(node)
 
 #define NODE_BALANCE_RATE 30	/* CHECKME */ 
diff -puN /dev/null include/linux/bitmap.h
--- /dev/null	2002-08-30 16:31:37.000000000 -0700
+++ 25-akpm/include/linux/bitmap.h	2003-07-03 01:19:48.000000000 -0700
@@ -0,0 +1,149 @@
+#ifndef __LINUX_BITMAP_H
+#define __LINUX_BITMAP_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+
+static inline int bitmap_empty(const volatile unsigned long *bitmap, int bits)
+{
+	int k;
+	for (k = 0; k < bits/BITS_PER_LONG; ++k)
+		if (bitmap[k])
+			return 0;
+
+	if (bits % BITS_PER_LONG)
+		if (bitmap[k] & ((1UL << (bits % BITS_PER_LONG)) - 1))
+			return 0;
+
+	return 1;
+}
+
+static inline int bitmap_full(const volatile unsigned long *bitmap, int bits)
+{
+	int k;
+	for (k = 0; k < bits/BITS_PER_LONG; ++k)
+		if (~bitmap[k])
+			return 0;
+
+	if (bits % BITS_PER_LONG)
+		if (~bitmap[k] & ((1UL << (bits % BITS_PER_LONG)) - 1))
+			return 0;
+
+	return 1;
+}
+
+static inline int bitmap_equal(const volatile unsigned long *bitmap1, volatile unsigned long *bitmap2, int bits)
+{
+	int k;
+	for (k = 0; k < bits/BITS_PER_LONG; ++k)
+		if (bitmap1[k] != bitmap2[k])
+			return 0;
+
+	if (bits % BITS_PER_LONG)
+		if ((bitmap1[k] ^ bitmap2[k]) & ((1UL << (bits % BITS_PER_LONG)) - 1))
+			return 0;
+
+	return 1;
+}
+
+static inline void bitmap_complement(volatile unsigned long *bitmap, int bits)
+{
+	int k;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		bitmap[k] = ~bitmap[k];
+}
+
+static inline void bitmap_clear(volatile unsigned long *bitmap, int bits)
+{
+	CLEAR_BITMAP((unsigned long *)bitmap, bits);
+}
+
+static inline void bitmap_fill(volatile unsigned long *bitmap, int bits)
+{
+	memset((unsigned long *)bitmap, 0xff, BITS_TO_LONGS(bits)*sizeof(unsigned long));
+}
+
+static inline void bitmap_copy(volatile unsigned long *dst, const volatile unsigned long *src, int bits)
+{
+	memcpy((unsigned long *)dst, (unsigned long *)src, BITS_TO_LONGS(bits)*sizeof(unsigned long));
+}
+
+static inline void bitmap_shift_left(volatile unsigned long *,const volatile unsigned long *,int,int);
+static inline void bitmap_shift_right(volatile unsigned long *dst, const volatile unsigned long *src, int shift, int bits)
+{
+	int k;
+	DECLARE_BITMAP(__shr_tmp, bits);
+
+	bitmap_clear(__shr_tmp, bits);
+	for (k = 0; k < bits - shift; ++k)
+		if (test_bit(k + shift, src))
+			set_bit(k, __shr_tmp);
+	bitmap_copy(dst, __shr_tmp, bits);
+}
+
+static inline void bitmap_shift_left(volatile unsigned long *dst, const volatile unsigned long *src, int shift, int bits)
+{
+	int k;
+	DECLARE_BITMAP(__shl_tmp, bits);
+
+	bitmap_clear(__shl_tmp, bits);
+	for (k = bits; k >= shift; --k)
+		if (test_bit(k - shift, src))
+			set_bit(k, __shl_tmp);
+	bitmap_copy(dst, __shl_tmp, bits);
+}
+
+static inline void bitmap_and(volatile unsigned long *dst, const volatile unsigned long *bitmap1, const volatile unsigned long *bitmap2, int bits)
+{
+	int k;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		dst[k] = bitmap1[k] & bitmap2[k];
+}
+
+static inline void bitmap_or(volatile unsigned long *dst, const volatile unsigned long *bitmap1, const volatile unsigned long *bitmap2, int bits)
+{
+	int k;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		dst[k] = bitmap1[k] | bitmap2[k];
+}
+
+#if BITS_PER_LONG == 32
+static inline int bitmap_weight(const volatile unsigned long *bitmap, int bits)
+{
+	int k, w = 0;
+
+	for (k = 0; k < bits/BITS_PER_LONG; ++k)
+		w += hweight32(bitmap[k]);
+
+	if (bits % BITS_PER_LONG)
+		w+= hweight32(bitmap[k] & ((1UL << (bits % BITS_PER_LONG)) - 1));
+
+	return w;
+}
+#else
+static inline int bitmap_weight(const volatile unsigned long *bitmap, int bits)
+{
+	int k, w = 0;
+
+	for (k = 0; k < bits/BITS_PER_LONG; ++k)
+		w += hweight64(bitmap[k]);
+
+	if (bits % BITS_PER_LONG)
+		w += hweight64(bitmap[k] & ((1UL << (bits % BITS_PER_LONG)) - 1));
+
+	return w;
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __LINUX_BITMAP_H */
diff -puN /dev/null include/linux/cpumask.h
--- /dev/null	2002-08-30 16:31:37.000000000 -0700
+++ 25-akpm/include/linux/cpumask.h	2003-07-03 01:19:48.000000000 -0700
@@ -0,0 +1,62 @@
+#ifndef __LINUX_CPUMASK_H
+#define __LINUX_CPUMASK_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#define CPU_ARRAY_SIZE		BITS_TO_LONGS(NR_CPUS)
+
+struct cpumask
+{
+	unsigned long mask[CPU_ARRAY_SIZE];
+};
+
+typedef struct cpumask cpumask_t;
+
+#ifdef CONFIG_SMP
+#include <asm-generic/cpumask_array.h>
+#else
+#include <asm-generic/cpumask_up.h>
+#endif
+
+#if NR_CPUS <= 4*BITS_PER_LONG
+#include <asm-generic/cpumask_const_value.h>
+#else
+#include <asm-generic/cpumask_const_reference.h>
+#endif
+
+
+#ifdef CONFIG_SMP
+
+extern cpumask_t cpu_online_map;
+
+#define num_online_cpus()		cpus_weight(cpu_online_map)
+#define cpu_online(cpu)			cpu_isset(cpu, cpu_online_map)
+#else
+#define	cpu_online_map			cpumask_of_cpu(0)
+#define num_online_cpus()		1
+#define cpu_online(cpu)			({ BUG_ON((cpu) != 0); 1; })
+#endif
+
+static inline int next_online_cpu(int cpu, cpumask_t map)
+{
+	do
+		cpu = next_cpu_const(cpu, map);
+	while (cpu < NR_CPUS && !cpu_online(cpu));
+	return cpu;
+}
+
+#define for_each_cpu(cpu, map)						\
+	for (cpu = first_cpu_const(map);				\
+		cpu < NR_CPUS;						\
+		cpu = next_cpu_const(cpu,map))
+
+#define for_each_online_cpu(cpu, map)					\
+	for (cpu = first_cpu_const(map);				\
+		cpu < NR_CPUS;						\
+		cpu = next_online_cpu(cpu,map))
+
+#endif /* __LINUX_CPUMASK_H */
diff -puN include/linux/init_task.h~cpumask_t-1 include/linux/init_task.h
--- 25/include/linux/init_task.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/linux/init_task.h	2003-07-03 01:19:48.000000000 -0700
@@ -70,7 +70,7 @@
 	.prio		= MAX_PRIO-20,					\
 	.static_prio	= MAX_PRIO-20,					\
 	.policy		= SCHED_NORMAL,					\
-	.cpus_allowed	= ~0UL,						\
+	.cpus_allowed	= CPU_MASK_ALL,					\
 	.mm		= NULL,						\
 	.active_mm	= &init_mm,					\
 	.run_list	= LIST_HEAD_INIT(tsk.run_list),			\
diff -puN include/linux/irq.h~cpumask_t-1 include/linux/irq.h
--- 25/include/linux/irq.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/linux/irq.h	2003-07-03 01:19:48.000000000 -0700
@@ -15,6 +15,7 @@
 
 #include <linux/cache.h>
 #include <linux/spinlock.h>
+#include <linux/cpumask.h>
 
 #include <asm/irq.h>
 #include <asm/ptrace.h>
@@ -44,7 +45,7 @@ struct hw_interrupt_type {
 	void (*disable)(unsigned int irq);
 	void (*ack)(unsigned int irq);
 	void (*end)(unsigned int irq);
-	void (*set_affinity)(unsigned int irq, unsigned long mask);
+	void (*set_affinity)(unsigned int irq, cpumask_t dest);
 };
 
 typedef struct hw_interrupt_type  hw_irq_controller;
diff -puN include/linux/node.h~cpumask_t-1 include/linux/node.h
--- 25/include/linux/node.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/linux/node.h	2003-07-03 01:19:48.000000000 -0700
@@ -20,9 +20,10 @@
 #define _LINUX_NODE_H_
 
 #include <linux/sysdev.h>
+#include <linux/cpumask.h>
 
 struct node {
-	unsigned long cpumap;	/* Bitmap of CPUs on the Node */
+	cpumask_t cpumap;	/* Bitmap of CPUs on the Node */
 	struct sys_device	sysdev;
 };
 
diff -puN include/linux/rcupdate.h~cpumask_t-1 include/linux/rcupdate.h
--- 25/include/linux/rcupdate.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/linux/rcupdate.h	2003-07-03 01:19:48.000000000 -0700
@@ -40,6 +40,7 @@
 #include <linux/spinlock.h>
 #include <linux/threads.h>
 #include <linux/percpu.h>
+#include <linux/cpumask.h>
 
 /**
  * struct rcu_head - callback structure for use with RCU
@@ -67,7 +68,7 @@ struct rcu_ctrlblk {
 	spinlock_t	mutex;		/* Guard this struct                  */
 	long		curbatch;	/* Current batch number.	      */
 	long		maxbatch;	/* Max requested batch number.        */
-	unsigned long	rcu_cpu_mask; 	/* CPUs that need to switch in order  */
+	cpumask_t	rcu_cpu_mask; 	/* CPUs that need to switch in order  */
 					/* for current batch to proceed.      */
 };
 
@@ -118,7 +119,7 @@ static inline int rcu_pending(int cpu) 
 	     rcu_batch_before(RCU_batch(cpu), rcu_ctrlblk.curbatch)) ||
 	    (list_empty(&RCU_curlist(cpu)) &&
 			 !list_empty(&RCU_nxtlist(cpu))) ||
-	    test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask))
+	    cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
 		return 1;
 	else
 		return 0;
diff -puN include/linux/sched.h~cpumask_t-1 include/linux/sched.h
--- 25/include/linux/sched.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/linux/sched.h	2003-07-03 01:19:48.000000000 -0700
@@ -12,6 +12,7 @@
 #include <linux/jiffies.h>
 #include <linux/rbtree.h>
 #include <linux/thread_info.h>
+#include <linux/cpumask.h>
 
 #include <asm/system.h>
 #include <asm/semaphore.h>
@@ -207,7 +208,7 @@ struct mm_struct {
 	unsigned long arg_start, arg_end, env_start, env_end;
 	unsigned long rss, total_vm, locked_vm;
 	unsigned long def_flags;
-	unsigned long cpu_vm_mask;
+	cpumask_t cpu_vm_mask;
 	unsigned long swap_address;
 
 	unsigned dumpable:1;
@@ -352,7 +353,7 @@ struct task_struct {
 #endif
 
 	unsigned long policy;
-	unsigned long cpus_allowed;
+	cpumask_t cpus_allowed;
 	unsigned int time_slice, first_time_slice;
 
 	struct list_head tasks;
@@ -511,9 +512,9 @@ extern void __put_task_struct(struct tas
 #define PF_SYNCWRITE	0x00200000	/* I am doing a sync write */
 
 #ifdef CONFIG_SMP
-extern int set_cpus_allowed(task_t *p, unsigned long new_mask);
+extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
 #else
-static inline int set_cpus_allowed(task_t *p, unsigned long new_mask)
+static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
 {
 	return 0;
 }
diff -puN include/linux/smp.h~cpumask_t-1 include/linux/smp.h
--- 25/include/linux/smp.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/linux/smp.h	2003-07-03 01:19:48.000000000 -0700
@@ -102,9 +102,6 @@ void smp_prepare_boot_cpu(void);
 #define smp_call_function(func,info,retry,wait)	({ 0; })
 #define on_each_cpu(func,info,retry,wait)	({ func(info); 0; })
 static inline void smp_send_reschedule(int cpu) { }
-#define cpu_online_map				1
-#define cpu_online(cpu)				({ BUG_ON((cpu) != 0); 1; })
-#define num_online_cpus()			1
 #define num_booting_cpus()			1
 #define cpu_possible(cpu)			({ BUG_ON((cpu) != 0); 1; })
 #define smp_prepare_boot_cpu()			do {} while (0)
diff -puN include/linux/topology.h~cpumask_t-1 include/linux/topology.h
--- 25/include/linux/topology.h~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/include/linux/topology.h	2003-07-03 01:19:48.000000000 -0700
@@ -27,6 +27,7 @@
 #ifndef _LINUX_TOPOLOGY_H
 #define _LINUX_TOPOLOGY_H
 
+#include <linux/cpumask.h>
 #include <linux/bitops.h>
 #include <linux/mmzone.h>
 #include <linux/smp.h>
@@ -34,7 +35,12 @@
 #include <asm/topology.h>
 
 #ifndef nr_cpus_node
-#define nr_cpus_node(node)	(hweight_long(node_to_cpumask(node)))
+#define nr_cpus_node(node)							\
+	({									\
+		cpumask_t __tmp__;						\
+		__tmp__ = node_to_cpumask(node);				\
+		cpus_weight(__tmp__);						\
+	})
 #endif
 
 static inline int __next_node_with_cpus(int node)
diff -puN kernel/fork.c~cpumask_t-1 kernel/fork.c
--- 25/kernel/fork.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/kernel/fork.c	2003-07-03 01:19:48.000000000 -0700
@@ -275,7 +275,7 @@ static inline int dup_mmap(struct mm_str
 	mm->free_area_cache = TASK_UNMAPPED_BASE;
 	mm->map_count = 0;
 	mm->rss = 0;
-	mm->cpu_vm_mask = 0;
+	cpus_clear(mm->cpu_vm_mask);
 	pprev = &mm->mmap;
 
 	/*
diff -puN kernel/module.c~cpumask_t-1 kernel/module.c
--- 25/kernel/module.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/kernel/module.c	2003-07-03 01:19:48.000000000 -0700
@@ -471,7 +471,7 @@ static int stopref(void *cpu)
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 	setscheduler(current->pid, SCHED_FIFO, &param);
 #endif
-	set_cpus_allowed(current, 1UL << (unsigned long)cpu);
+	set_cpus_allowed(current, cpumask_of_cpu((int)cpu));
 
 	/* Ack: we are alive */
 	atomic_inc(&stopref_thread_ack);
@@ -524,7 +524,7 @@ static void stopref_set_state(enum stopr
 static int stop_refcounts(void)
 {
 	unsigned int i, cpu;
-	unsigned long old_allowed;
+	cpumask_t old_allowed;
 	int ret = 0;
 
 	/* One thread per cpu.  We'll do our own. */
@@ -532,7 +532,7 @@ static int stop_refcounts(void)
 
 	/* FIXME: racy with set_cpus_allowed. */
 	old_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, 1UL << (unsigned long)cpu);
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
 
 	atomic_set(&stopref_thread_ack, 0);
 	stopref_num_threads = 0;
diff -puN kernel/rcupdate.c~cpumask_t-1 kernel/rcupdate.c
--- 25/kernel/rcupdate.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/kernel/rcupdate.c	2003-07-03 01:19:48.000000000 -0700
@@ -49,7 +49,7 @@
 /* Definition for rcupdate control block. */
 struct rcu_ctrlblk rcu_ctrlblk = 
 	{ .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1, 
-	  .maxbatch = 1, .rcu_cpu_mask = 0 };
+	  .maxbatch = 1, .rcu_cpu_mask = CPU_MASK_NONE };
 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
 
 /* Fake initialization required by compiler */
@@ -110,7 +110,7 @@ static void rcu_start_batch(long newbatc
 		rcu_ctrlblk.maxbatch = newbatch;
 	}
 	if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
-	    (rcu_ctrlblk.rcu_cpu_mask != 0)) {
+	    !cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
 		return;
 	}
 	rcu_ctrlblk.rcu_cpu_mask = cpu_online_map;
@@ -125,7 +125,7 @@ static void rcu_check_quiescent_state(vo
 {
 	int cpu = smp_processor_id();
 
-	if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask))
+	if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
 		return;
 
 	/* 
@@ -141,12 +141,12 @@ static void rcu_check_quiescent_state(vo
 		return;
 
 	spin_lock(&rcu_ctrlblk.mutex);
-	if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask))
+	if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
 		goto out_unlock;
 
-	clear_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask);
+	cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
 	RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
-	if (rcu_ctrlblk.rcu_cpu_mask != 0)
+	if (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
 		goto out_unlock;
 
 	rcu_ctrlblk.curbatch++;
diff -puN kernel/sched.c~cpumask_t-1 kernel/sched.c
--- 25/kernel/sched.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/kernel/sched.c	2003-07-03 01:19:48.000000000 -0700
@@ -489,7 +489,7 @@ repeat_lock_task:
 			 */
 			if (unlikely(sync && !task_running(rq, p) &&
 				(task_cpu(p) != smp_processor_id()) &&
-				(p->cpus_allowed & (1UL << smp_processor_id())))) {
+				cpu_isset(smp_processor_id(), p->cpus_allowed))) {
 
 				set_task_cpu(p, smp_processor_id());
 				task_rq_unlock(rq, &flags);
@@ -758,13 +758,13 @@ static inline void double_rq_unlock(runq
  */
 static void sched_migrate_task(task_t *p, int dest_cpu)
 {
-	unsigned long old_mask;
+	cpumask_t old_mask;
 
 	old_mask = p->cpus_allowed;
-	if (!(old_mask & (1UL << dest_cpu)))
+	if (!cpu_isset(dest_cpu, old_mask))
 		return;
 	/* force the process onto the specified CPU */
-	set_cpus_allowed(p, 1UL << dest_cpu);
+	set_cpus_allowed(p, cpumask_of_cpu(dest_cpu));
 
 	/* restore the cpus allowed mask */
 	set_cpus_allowed(p, old_mask);
@@ -777,7 +777,7 @@ static void sched_migrate_task(task_t *p
 static int sched_best_cpu(struct task_struct *p)
 {
 	int i, minload, load, best_cpu, node = 0;
-	unsigned long cpumask;
+	cpumask_t cpumask;
 
 	best_cpu = task_cpu(p);
 	if (cpu_rq(best_cpu)->nr_running <= 2)
@@ -801,7 +801,7 @@ static int sched_best_cpu(struct task_st
 	minload = 10000000;
 	cpumask = node_to_cpumask(node);
 	for (i = 0; i < NR_CPUS; ++i) {
-		if (!(cpumask & (1UL << i)))
+		if (!cpu_isset(i, cpumask))
 			continue;
 		if (cpu_rq(i)->nr_running < minload) {
 			best_cpu = i;
@@ -888,7 +888,7 @@ static inline unsigned int double_lock_b
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
  */
-static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance, unsigned long cpumask)
+static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance, cpumask_t cpumask)
 {
 	int nr_running, load, max_load, i;
 	runqueue_t *busiest, *rq_src;
@@ -923,7 +923,7 @@ static inline runqueue_t *find_busiest_q
 	busiest = NULL;
 	max_load = 1;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (!((1UL << i) & cpumask))
+		if (!cpu_isset(i, cpumask))
 			continue;
 
 		rq_src = cpu_rq(i);
@@ -995,7 +995,7 @@ static inline void pull_task(runqueue_t 
  * We call this with the current runqueue locked,
  * irqs disabled.
  */
-static void load_balance(runqueue_t *this_rq, int idle, unsigned long cpumask)
+static void load_balance(runqueue_t *this_rq, int idle, cpumask_t cpumask)
 {
 	int imbalance, idx, this_cpu = smp_processor_id();
 	runqueue_t *busiest;
@@ -1049,7 +1049,7 @@ skip_queue:
 #define CAN_MIGRATE_TASK(p,rq,this_cpu)					\
 	((!idle || (jiffies - (p)->last_run > cache_decay_ticks)) &&	\
 		!task_running(rq, p) &&					\
-			((p)->cpus_allowed & (1UL << (this_cpu))))
+			cpu_isset(this_cpu, (p)->cpus_allowed))
 
 	curr = curr->prev;
 
@@ -1092,10 +1092,10 @@ out:
 static void balance_node(runqueue_t *this_rq, int idle, int this_cpu)
 {
 	int node = find_busiest_node(cpu_to_node(this_cpu));
-	unsigned long cpumask, this_cpumask = 1UL << this_cpu;
 
 	if (node >= 0) {
-		cpumask = node_to_cpumask(node) | this_cpumask;
+		cpumask_t cpumask = node_to_cpumask(node);
+		cpu_set(this_cpu, cpumask);
 		spin_lock(&this_rq->lock);
 		load_balance(this_rq, idle, cpumask);
 		spin_unlock(&this_rq->lock);
@@ -1912,7 +1912,7 @@ out_unlock:
 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
 				      unsigned long __user *user_mask_ptr)
 {
-	unsigned long new_mask;
+	cpumask_t new_mask;
 	int retval;
 	task_t *p;
 
@@ -1960,7 +1960,7 @@ asmlinkage long sys_sched_getaffinity(pi
 				      unsigned long __user *user_mask_ptr)
 {
 	unsigned int real_len;
-	unsigned long mask;
+	cpumask_t mask;
 	int retval;
 	task_t *p;
 
@@ -1976,7 +1976,7 @@ asmlinkage long sys_sched_getaffinity(pi
 		goto out_unlock;
 
 	retval = 0;
-	mask = p->cpus_allowed & cpu_online_map;
+	cpus_and(mask, p->cpus_allowed, cpu_online_map);
 
 out_unlock:
 	read_unlock(&tasklist_lock);
@@ -2307,7 +2307,7 @@ typedef struct {
  * task must not exit() & deallocate itself prematurely.  The
  * call is not atomic; no spinlocks may be held.
  */
-int set_cpus_allowed(task_t *p, unsigned long new_mask)
+int set_cpus_allowed(task_t *p, cpumask_t new_mask)
 {
 	unsigned long flags;
 	migration_req_t req;
@@ -2322,7 +2322,7 @@ int set_cpus_allowed(task_t *p, unsigned
 	 * Can the task run on the task's current CPU? If not then
 	 * migrate the thread off to a proper CPU.
 	 */
-	if (new_mask & (1UL << task_cpu(p))) {
+	if (cpu_isset(task_cpu(p), new_mask)) {
 		task_rq_unlock(rq, &flags);
 		return 0;
 	}
@@ -2392,7 +2392,7 @@ static int migration_thread(void * data)
 	 * migration thread on this CPU, guaranteed (we're started
 	 * serially).
 	 */
-	set_cpus_allowed(current, 1UL << cpu);
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
 
 	ret = setscheduler(0, SCHED_FIFO, &param);
 
diff -puN kernel/softirq.c~cpumask_t-1 kernel/softirq.c
--- 25/kernel/softirq.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/kernel/softirq.c	2003-07-03 01:19:48.000000000 -0700
@@ -326,9 +326,8 @@ static int ksoftirqd(void * __bind_cpu)
 	current->flags |= PF_IOTHREAD;
 
 	/* Migrate to the right CPU */
-	set_cpus_allowed(current, 1UL << cpu);
-	if (smp_processor_id() != cpu)
-		BUG();
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	BUG_ON(smp_processor_id() != cpu);
 
 	__set_current_state(TASK_INTERRUPTIBLE);
 	mb();
diff -puN kernel/workqueue.c~cpumask_t-1 kernel/workqueue.c
--- 25/kernel/workqueue.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/kernel/workqueue.c	2003-07-03 01:19:48.000000000 -0700
@@ -176,7 +176,7 @@ static int worker_thread(void *__startup
 	cwq->thread = current;
 
 	set_user_nice(current, -10);
-	set_cpus_allowed(current, 1UL << cpu);
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
 
 	complete(&startup->done);
 
diff -puN mm/vmscan.c~cpumask_t-1 mm/vmscan.c
--- 25/mm/vmscan.c~cpumask_t-1	2003-07-03 01:19:48.000000000 -0700
+++ 25-akpm/mm/vmscan.c	2003-07-03 01:19:48.000000000 -0700
@@ -956,11 +956,11 @@ int kswapd(void *p)
 	struct reclaim_state reclaim_state = {
 		.reclaimed_slab = 0,
 	};
-	unsigned long cpumask;
+	cpumask_t cpumask;
 
 	daemonize("kswapd%d", pgdat->node_id);
 	cpumask = node_to_cpumask(pgdat->node_id);
-	if (cpumask)
+	if (!cpus_empty(cpumask))
 		set_cpus_allowed(tsk, cpumask);
 	current->reclaim_state = &reclaim_state;
 

_