diff options
-rw-r--r-- | 2.6.32/4420_grsecurity-2.9-2.6.32.59-201205271952.patch | 86301 |
1 files changed, 86300 insertions, 1 deletions
diff --git a/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205271952.patch b/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205271952.patch index b54be17..dc0f735 100644 --- a/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205271952.patch +++ b/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205271952.patch @@ -33837,4 +33837,86303 @@ index 3a19e2d..1b8116a3 100644 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid); #include <linux/sysctl.h> - static int min_read_thresh = 8, min_wr
\ No newline at end of file + static int min_read_thresh = 8, min_write_thresh; +-static int max_read_thresh = INPUT_POOL_WORDS * 32; ++static int max_read_thresh = OUTPUT_POOL_WORDS * 32; + static int max_write_thresh = INPUT_POOL_WORDS * 32; + static char sysctl_bootid[16]; + +@@ -1231,10 +1243,15 @@ static int proc_do_uuid(ctl_table *table, int write, + uuid = table->data; + if (!uuid) { + uuid = tmp_uuid; +- uuid[8] = 0; +- } +- if (uuid[8] == 0) + generate_random_uuid(uuid); ++ } else { ++ static DEFINE_SPINLOCK(bootid_spinlock); ++ ++ spin_lock(&bootid_spinlock); ++ if (!uuid[8]) ++ generate_random_uuid(uuid); ++ spin_unlock(&bootid_spinlock); ++ } + + sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" + "%02x%02x%02x%02x%02x%02x", +diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c +index 0e29a23..0efc2c2 100644 +--- a/drivers/char/rocket.c ++++ b/drivers/char/rocket.c +@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports) + struct rocket_ports tmp; + int board; + ++ pax_track_stack(); ++ + if (!retports) + return -EFAULT; + memset(&tmp, 0, sizeof (tmp)); +diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c +index 8c262aa..4d3b058 100644 +--- a/drivers/char/sonypi.c ++++ b/drivers/char/sonypi.c +@@ -55,6 +55,7 @@ + #include <asm/uaccess.h> + #include <asm/io.h> + #include <asm/system.h> ++#include <asm/local.h> + + #include <linux/sonypi.h> + +@@ -491,7 +492,7 @@ static struct sonypi_device { + spinlock_t fifo_lock; + wait_queue_head_t fifo_proc_list; + struct fasync_struct *fifo_async; +- int open_count; ++ local_t open_count; + int model; + struct input_dev *input_jog_dev; + struct input_dev *input_key_dev; +@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on) + static int sonypi_misc_release(struct inode *inode, struct file *file) + { + mutex_lock(&sonypi_device.lock); +- sonypi_device.open_count--; ++ local_dec(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + return 0; + } +@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) + lock_kernel(); + mutex_lock(&sonypi_device.lock); + /* Flush input queue on first open */ +- if (!sonypi_device.open_count) ++ if (!local_read(&sonypi_device.open_count)) + kfifo_reset(sonypi_device.fifo); +- sonypi_device.open_count++; ++ local_inc(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + unlock_kernel(); + return 0; +diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c +index db6dcfa..13834cb 100644 +--- a/drivers/char/stallion.c ++++ b/drivers/char/stallion.c +@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg) + struct stlport stl_dummyport; + struct stlport *portp; + ++ pax_track_stack(); ++ + if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport))) + return -EFAULT; + portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr, +diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c +index 44203ff..09a3678 100644 +--- a/drivers/char/sysrq.c ++++ b/drivers/char/sysrq.c +@@ -591,7 +591,7 @@ EXPORT_SYMBOL(unregister_sysrq_key); + static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { +- if (count) { ++ if (count && capable(CAP_SYS_ADMIN)) { + char c; + + if (get_user(c, buf)) +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c +index a0789f6..cea3902 100644 +--- a/drivers/char/tpm/tpm.c ++++ b/drivers/char/tpm/tpm.c +@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, + chip->vendor.req_complete_val) + goto out_recv; + +- if ((status == chip->vendor.req_canceled)) { ++ if (status == chip->vendor.req_canceled) { + dev_err(chip->dev, "Operation Canceled\n"); + rc = -ECANCELED; + goto out; +@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, + + struct tpm_chip *chip = dev_get_drvdata(dev); + ++ pax_track_stack(); ++ + tpm_cmd.header.in = tpm_readpubek_header; + err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, + "attempting to read the PUBEK"); +diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c +index bf2170f..ce8cab9 100644 +--- a/drivers/char/tpm/tpm_bios.c ++++ b/drivers/char/tpm/tpm_bios.c +@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) + event = addr; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - addr - sizeof(struct tcpa_event))) + return NULL; + + return addr; +@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v, + return NULL; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - v - sizeof(struct tcpa_event))) + return NULL; + + (*pos)++; +@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) + int i; + + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) +- seq_putc(m, data[i]); ++ if (!seq_putc(m, data[i])) ++ return -EFAULT; + + return 0; + } +@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log) + log->bios_event_log_end = log->bios_event_log + len; + + virt = acpi_os_map_memory(start, len); ++ if (!virt) { ++ kfree(log->bios_event_log); ++ log->bios_event_log = NULL; ++ return -EFAULT; ++ } + +- memcpy(log->bios_event_log, virt, len); ++ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len); + + acpi_os_unmap_memory(virt, len); + return 0; +diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c +index 123cedf..6664cb4 100644 +--- a/drivers/char/tty_io.c ++++ b/drivers/char/tty_io.c +@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *); + static int tty_release(struct inode *, struct file *); + long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + #ifdef CONFIG_COMPAT +-static long tty_compat_ioctl(struct file *file, unsigned int cmd, ++long tty_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + #else + #define tty_compat_ioctl NULL +@@ -1774,6 +1774,7 @@ got_driver: + + if (IS_ERR(tty)) { + mutex_unlock(&tty_mutex); ++ tty_driver_kref_put(driver); + return PTR_ERR(tty); + } + } +@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + return retval; + } + ++EXPORT_SYMBOL(tty_ioctl); ++ + #ifdef CONFIG_COMPAT +-static long tty_compat_ioctl(struct file *file, unsigned int cmd, ++long tty_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) + { + struct inode *inode = file->f_dentry->d_inode; +@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd, + + return retval; + } ++ ++EXPORT_SYMBOL(tty_compat_ioctl); + #endif + + /* +@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty); + + void tty_default_fops(struct file_operations *fops) + { +- *fops = tty_fops; ++ memcpy((void *)fops, &tty_fops, sizeof(tty_fops)); + } + + /* +diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c +index d814a3d..b55b9c9 100644 +--- a/drivers/char/tty_ldisc.c ++++ b/drivers/char/tty_ldisc.c +@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld) + if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { + struct tty_ldisc_ops *ldo = ld->ops; + +- ldo->refcount--; ++ atomic_dec(&ldo->refcount); + module_put(ldo->owner); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + +@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc) + spin_lock_irqsave(&tty_ldisc_lock, flags); + tty_ldiscs[disc] = new_ldisc; + new_ldisc->num = disc; +- new_ldisc->refcount = 0; ++ atomic_set(&new_ldisc->refcount, 0); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + + return ret; +@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc) + return -EINVAL; + + spin_lock_irqsave(&tty_ldisc_lock, flags); +- if (tty_ldiscs[disc]->refcount) ++ if (atomic_read(&tty_ldiscs[disc]->refcount)) + ret = -EBUSY; + else + tty_ldiscs[disc] = NULL; +@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc) + if (ldops) { + ret = ERR_PTR(-EAGAIN); + if (try_module_get(ldops->owner)) { +- ldops->refcount++; ++ atomic_inc(&ldops->refcount); + ret = ldops; + } + } +@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops) + unsigned long flags; + + spin_lock_irqsave(&tty_ldisc_lock, flags); +- ldops->refcount--; ++ atomic_dec(&ldops->refcount); + module_put(ldops->owner); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + } +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index a035ae3..c27fe2c 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count) + * virtqueue, so we let the drivers do some boutique early-output thing. */ + int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) + { +- virtio_cons.put_chars = put_chars; ++ pax_open_kernel(); ++ *(void **)&virtio_cons.put_chars = put_chars; ++ pax_close_kernel(); + return hvc_instantiate(0, 0, &virtio_cons); + } + +@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev) + out_vq = vqs[1]; + + /* Start using the new console output. */ +- virtio_cons.get_chars = get_chars; +- virtio_cons.put_chars = put_chars; +- virtio_cons.notifier_add = notifier_add_vio; +- virtio_cons.notifier_del = notifier_del_vio; +- virtio_cons.notifier_hangup = notifier_del_vio; ++ pax_open_kernel(); ++ *(void **)&virtio_cons.get_chars = get_chars; ++ *(void **)&virtio_cons.put_chars = put_chars; ++ *(void **)&virtio_cons.notifier_add = notifier_add_vio; ++ *(void **)&virtio_cons.notifier_del = notifier_del_vio; ++ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio; ++ pax_close_kernel(); + + /* The first argument of hvc_alloc() is the virtual console number, so + * we use zero. The second argument is the parameter for the +diff --git a/drivers/char/vt.c b/drivers/char/vt.c +index 0c80c68..53d59c1 100644 +--- a/drivers/char/vt.c ++++ b/drivers/char/vt.c +@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier); + + static void notify_write(struct vc_data *vc, unsigned int unicode) + { +- struct vt_notifier_param param = { .vc = vc, unicode = unicode }; ++ struct vt_notifier_param param = { .vc = vc, .c = unicode }; + atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, ¶m); + } + +diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c +index 6351a26..999af95 100644 +--- a/drivers/char/vt_ioctl.c ++++ b/drivers/char/vt_ioctl.c +@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str + if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) + return -EFAULT; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + switch (cmd) { + case KDGKBENT: + key_map = key_maps[s]; +@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str + val = (i ? K_HOLE : K_NOSUCHMAP); + return put_user(val, &user_kbe->kb_value); + case KDSKBENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) + return -EPERM; ++ + if (!i && v == K_NOSUCHMAP) { + /* deallocate map */ + key_map = key_maps[s]; +@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + int i, j, k; + int ret; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); + if (!kbs) { + ret = -ENOMEM; +@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + kfree(kbs); + return ((p && *p) ? -EOVERFLOW : 0); + case KDSKBSENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) { + ret = -EPERM; + goto reterr; +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index c7ae026..1769c1d 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj) + complete(&policy->kobj_unregister); + } + +-static struct sysfs_ops sysfs_ops = { ++static const struct sysfs_ops sysfs_ops = { + .show = show, + .store = store, + }; +diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c +index 97b0038..2056670 100644 +--- a/drivers/cpuidle/sysfs.c ++++ b/drivers/cpuidle/sysfs.c +@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr, + return ret; + } + +-static struct sysfs_ops cpuidle_sysfs_ops = { ++static const struct sysfs_ops cpuidle_sysfs_ops = { + .show = cpuidle_show, + .store = cpuidle_store, + }; +@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj, + return ret; + } + +-static struct sysfs_ops cpuidle_state_sysfs_ops = { ++static const struct sysfs_ops cpuidle_state_sysfs_ops = { + .show = cpuidle_state_show, + }; + +@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = { + .release = cpuidle_state_sysfs_release, + }; + +-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) ++static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) + { + kobject_put(&device->kobjs[i]->kobj); + wait_for_completion(&device->kobjs[i]->kobj_unregister); +diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c +index 5f753fc..0377ae9 100644 +--- a/drivers/crypto/hifn_795x.c ++++ b/drivers/crypto/hifn_795x.c +@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) + 0xCA, 0x34, 0x2B, 0x2E}; + struct scatterlist sg; + ++ pax_track_stack(); ++ + memset(src, 0, sizeof(src)); + memset(ctx.key, 0, sizeof(ctx.key)); + +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c +index 71e6482..de8d96c 100644 +--- a/drivers/crypto/padlock-aes.c ++++ b/drivers/crypto/padlock-aes.c +@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + struct crypto_aes_ctx gen_aes; + int cpu; + ++ pax_track_stack(); ++ + if (key_len % 8) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; +diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c +index dcc4ab7..cc834bb 100644 +--- a/drivers/dma/ioat/dma.c ++++ b/drivers/dma/ioat/dma.c +@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) + return entry->show(&chan->common, page); + } + +-struct sysfs_ops ioat_sysfs_ops = { ++const struct sysfs_ops ioat_sysfs_ops = { + .show = ioat_attr_show, + }; + +diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h +index bbc3e78..f2db62c 100644 +--- a/drivers/dma/ioat/dma.h ++++ b/drivers/dma/ioat/dma.h +@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, + unsigned long *phys_complete); + void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); + void ioat_kobject_del(struct ioatdma_device *device); +-extern struct sysfs_ops ioat_sysfs_ops; ++extern const struct sysfs_ops ioat_sysfs_ops; + extern struct ioat_sysfs_entry ioat_version_attr; + extern struct ioat_sysfs_entry ioat_cap_attr; + #endif /* IOATDMA_H */ +diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c +index 9908c9e..3ceb0e5 100644 +--- a/drivers/dma/ioat/dma_v3.c ++++ b/drivers/dma/ioat/dma_v3.c +@@ -71,10 +71,10 @@ + /* provide a lookup table for setting the source address in the base or + * extended descriptor of an xor or pq descriptor + */ +-static const u8 xor_idx_to_desc __read_mostly = 0xd0; +-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; +-static const u8 pq_idx_to_desc __read_mostly = 0xf8; +-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 }; ++static const u8 xor_idx_to_desc = 0xd0; ++static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; ++static const u8 pq_idx_to_desc = 0xf8; ++static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; + + static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) + { +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index 85c464a..afd1e73 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) + * PCI core identifies what devices are on a system during boot, and then + * inquiry this table to see if this driver is for a given device found. + */ +-static const struct pci_device_id amd64_pci_table[] __devinitdata = { ++static const struct pci_device_id amd64_pci_table[] __devinitconst = { + { + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, +diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c +index 2b95f1a..4f52793 100644 +--- a/drivers/edac/amd76x_edac.c ++++ b/drivers/edac/amd76x_edac.c +@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { ++static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + AMD762}, +diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c +index d205d49..74c9672 100644 +--- a/drivers/edac/e752x_edac.c ++++ b/drivers/edac/e752x_edac.c +@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { ++static const struct pci_device_id e752x_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7520}, +diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c +index c7d11cc..c59c1ca 100644 +--- a/drivers/edac/e7xxx_edac.c ++++ b/drivers/edac/e7xxx_edac.c +@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { ++static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7205}, +diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c +index 5376457..5fdedbc 100644 +--- a/drivers/edac/edac_device_sysfs.c ++++ b/drivers/edac/edac_device_sysfs.c +@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj, + } + + /* edac_dev file operations for an 'ctl_info' */ +-static struct sysfs_ops device_ctl_info_ops = { ++static const struct sysfs_ops device_ctl_info_ops = { + .show = edac_dev_ctl_info_show, + .store = edac_dev_ctl_info_store + }; +@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj, + } + + /* edac_dev file operations for an 'instance' */ +-static struct sysfs_ops device_instance_ops = { ++static const struct sysfs_ops device_instance_ops = { + .show = edac_dev_instance_show, + .store = edac_dev_instance_store + }; +@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj, + } + + /* edac_dev file operations for a 'block' */ +-static struct sysfs_ops device_block_ops = { ++static const struct sysfs_ops device_block_ops = { + .show = edac_dev_block_show, + .store = edac_dev_block_store + }; +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index e1d4ce0..88840e9 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, + return -EIO; + } + +-static struct sysfs_ops csrowfs_ops = { ++static const struct sysfs_ops csrowfs_ops = { + .show = csrowdev_show, + .store = csrowdev_store + }; +@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, + } + + /* Intermediate show/store table */ +-static struct sysfs_ops mci_ops = { ++static const struct sysfs_ops mci_ops = { + .show = mcidev_show, + .store = mcidev_store + }; +diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c +index 422728c..d8d9c88 100644 +--- a/drivers/edac/edac_pci_sysfs.c ++++ b/drivers/edac/edac_pci_sysfs.c +@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */ + static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ + static int edac_pci_poll_msec = 1000; /* one second workq period */ + +-static atomic_t pci_parity_count = ATOMIC_INIT(0); +-static atomic_t pci_nonparity_count = ATOMIC_INIT(0); ++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0); ++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0); + + static struct kobject *edac_pci_top_main_kobj; + static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); +@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj, + } + + /* fs_ops table */ +-static struct sysfs_ops pci_instance_ops = { ++static const struct sysfs_ops pci_instance_ops = { + .show = edac_pci_instance_show, + .store = edac_pci_instance_store + }; +@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj, + return -EIO; + } + +-static struct sysfs_ops edac_pci_sysfs_ops = { ++static const struct sysfs_ops edac_pci_sysfs_ops = { + .show = edac_pci_dev_show, + .store = edac_pci_dev_store + }; +@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + edac_printk(KERN_CRIT, EDAC_PCI, + "Signaled System Error on %s\n", + pci_name(dev)); +- atomic_inc(&pci_nonparity_count); ++ atomic_inc_unchecked(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { +@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Master Data Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { +@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Detected Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + } + +@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " + "Signaled System Error on %s\n", + pci_name(dev)); +- atomic_inc(&pci_nonparity_count); ++ atomic_inc_unchecked(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { +@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Master Data Parity Error on " + "%s\n", pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { +@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Detected Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + } + } +@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void) + if (!check_pci_errors) + return; + +- before_count = atomic_read(&pci_parity_count); ++ before_count = atomic_read_unchecked(&pci_parity_count); + + /* scan all PCI devices looking for a Parity Error on devices and + * bridges. +@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void) + /* Only if operator has selected panic on PCI Error */ + if (edac_pci_get_panic_on_pe()) { + /* If the count is different 'after' from 'before' */ +- if (before_count != atomic_read(&pci_parity_count)) ++ if (before_count != atomic_read_unchecked(&pci_parity_count)) + panic("EDAC: PCI Parity Error"); + } + } +diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c +index 6c9a0f2..9c1cf7e 100644 +--- a/drivers/edac/i3000_edac.c ++++ b/drivers/edac/i3000_edac.c +@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i3000_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I3000}, +diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c +index fde4db9..fe108f9 100644 +--- a/drivers/edac/i3200_edac.c ++++ b/drivers/edac/i3200_edac.c +@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i3200_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I3200}, +diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c +index adc10a2..57d4ccf 100644 +--- a/drivers/edac/i5000_edac.c ++++ b/drivers/edac/i5000_edac.c +@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev) + * + * The "E500P" device is the first device supported. + */ +-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i5000_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), + .driver_data = I5000P}, + +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c +index 22db05a..b2b5503 100644 +--- a/drivers/edac/i5100_edac.c ++++ b/drivers/edac/i5100_edac.c +@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i5100_pci_tbl[] __devinitconst = { + /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, + { 0, } +diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c +index f99d106..f050710 100644 +--- a/drivers/edac/i5400_edac.c ++++ b/drivers/edac/i5400_edac.c +@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev) + * + * The "E500P" device is the first device supported. + */ +-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i5400_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, + {0,} /* 0 terminated list. */ + }; +diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c +index 577760a..9ce16ce 100644 +--- a/drivers/edac/i82443bxgx_edac.c ++++ b/drivers/edac/i82443bxgx_edac.c +@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) + + EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); + +-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, +diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c +index c0088ba..64a7b98 100644 +--- a/drivers/edac/i82860_edac.c ++++ b/drivers/edac/i82860_edac.c +@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82860_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82860}, +diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c +index b2d83b9..a34357b 100644 +--- a/drivers/edac/i82875p_edac.c ++++ b/drivers/edac/i82875p_edac.c +@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82875P}, +diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c +index 2eed3ea..87bbbd1 100644 +--- a/drivers/edac/i82975x_edac.c ++++ b/drivers/edac/i82975x_edac.c +@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82975X +diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c +index 9900675..78ac2b6 100644 +--- a/drivers/edac/r82600_edac.c ++++ b/drivers/edac/r82600_edac.c +@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { ++static const struct pci_device_id r82600_pci_tbl[] __devinitconst = { + { + PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) + }, +diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c +index d4ec605..4cfec4e 100644 +--- a/drivers/edac/x38_edac.c ++++ b/drivers/edac/x38_edac.c +@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id x38_pci_tbl[] __devinitdata = { ++static const struct pci_device_id x38_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + X38}, +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c +index 3fc2ceb..daf098f 100644 +--- a/drivers/firewire/core-card.c ++++ b/drivers/firewire/core-card.c +@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref) + + void fw_core_remove_card(struct fw_card *card) + { +- struct fw_card_driver dummy_driver = dummy_driver_template; ++ fw_card_driver_no_const dummy_driver = dummy_driver_template; + + card->driver->update_phy_reg(card, 4, + PHY_LINK_ACTIVE | PHY_CONTENDER, 0); +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c +index 4560d8f..36db24a 100644 +--- a/drivers/firewire/core-cdev.c ++++ b/drivers/firewire/core-cdev.c +@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client, + int ret; + + if ((request->channels == 0 && request->bandwidth == 0) || +- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || +- request->bandwidth < 0) ++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) + return -EINVAL; + + r = kmalloc(sizeof(*r), GFP_KERNEL); +diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c +index da628c7..cf54a2c 100644 +--- a/drivers/firewire/core-transaction.c ++++ b/drivers/firewire/core-transaction.c +@@ -36,6 +36,7 @@ + #include <linux/string.h> + #include <linux/timer.h> + #include <linux/types.h> ++#include <linux/sched.h> + + #include <asm/byteorder.h> + +@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, + struct transaction_callback_data d; + struct fw_transaction t; + ++ pax_track_stack(); ++ + init_completion(&d.done); + d.payload = payload; + fw_send_request(card, &t, tcode, destination_id, generation, speed, +diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h +index 7ff6e75..a2965d9 100644 +--- a/drivers/firewire/core.h ++++ b/drivers/firewire/core.h +@@ -86,6 +86,7 @@ struct fw_card_driver { + + int (*stop_iso)(struct fw_iso_context *ctx); + }; ++typedef struct fw_card_driver __no_const fw_card_driver_no_const; + + void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, struct device *device); +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c +index 3a2ccb0..82fd7c4 100644 +--- a/drivers/firmware/dmi_scan.c ++++ b/drivers/firmware/dmi_scan.c +@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void) + } + } + else { +- /* +- * no iounmap() for that ioremap(); it would be a no-op, but +- * it's so early in setup that sucker gets confused into doing +- * what it shouldn't if we actually call it. +- */ + p = dmi_ioremap(0xF0000, 0x10000); + if (p == NULL) + goto error; +@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), + if (buf == NULL) + return -1; + +- dmi_table(buf, dmi_len, dmi_num, decode, private_data); ++ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data); + + iounmap(buf); + return 0; +diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c +index 9e4f59d..110e24e 100644 +--- a/drivers/firmware/edd.c ++++ b/drivers/firmware/edd.c +@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf) + return ret; + } + +-static struct sysfs_ops edd_attr_ops = { ++static const struct sysfs_ops edd_attr_ops = { + .show = edd_attr_show, + }; + +diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c +index f4f709d..082f06e 100644 +--- a/drivers/firmware/efivars.c ++++ b/drivers/firmware/efivars.c +@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr, + return ret; + } + +-static struct sysfs_ops efivar_attr_ops = { ++static const struct sysfs_ops efivar_attr_ops = { + .show = efivar_attr_show, + .store = efivar_attr_store, + }; +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c +index 051d1eb..0a5d4e7 100644 +--- a/drivers/firmware/iscsi_ibft.c ++++ b/drivers/firmware/iscsi_ibft.c +@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj, + return ret; + } + +-static struct sysfs_ops ibft_attr_ops = { ++static const struct sysfs_ops ibft_attr_ops = { + .show = ibft_show_attribute, + }; + +diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c +index 56f9234..8c58c7b 100644 +--- a/drivers/firmware/memmap.c ++++ b/drivers/firmware/memmap.c +@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = { + NULL + }; + +-static struct sysfs_ops memmap_attr_ops = { ++static const struct sysfs_ops memmap_attr_ops = { + .show = memmap_attr_show, + }; + +diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c +index b16c9a8..2af7d3f 100644 +--- a/drivers/gpio/vr41xx_giu.c ++++ b/drivers/gpio/vr41xx_giu.c +@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq) + printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", + maskl, pendl, maskh, pendh); + +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + + return -EINVAL; + } +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index bea6efc..3dc0f42 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, + */ + if ((out_resp->count_modes >= mode_count) && mode_count) { + copied = 0; +- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr; ++ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; + list_for_each_entry(mode, &connector->modes, head) { + drm_crtc_convert_to_umode(&u_mode, mode); + if (copy_to_user(mode_ptr + copied, +@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, + + if ((out_resp->count_props >= props_count) && props_count) { + copied = 0; +- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr); +- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr); ++ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr); ++ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr); + for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { + if (connector->property_ids[i] != 0) { + if (put_user(connector->property_ids[i], +@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, + + if ((out_resp->count_encoders >= encoders_count) && encoders_count) { + copied = 0; +- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr); ++ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] != 0) { + if (put_user(connector->encoder_ids[i], +@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, + } + + for (i = 0; i < crtc_req->count_connectors; i++) { +- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr; ++ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr; + if (get_user(out_id, &set_connectors_ptr[i])) { + ret = -EFAULT; + goto out; +@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + out_resp->flags = property->flags; + + if ((out_resp->count_values >= value_count) && value_count) { +- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr; ++ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; + for (i = 0; i < value_count; i++) { + if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { + ret = -EFAULT; +@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + if (property->flags & DRM_MODE_PROP_ENUM) { + if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { + copied = 0; +- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr; ++ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; + list_for_each_entry(prop_enum, &property->enum_blob_list, head) { + + if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { +@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { + copied = 0; + blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr; +- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr; ++ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr; + + list_for_each_entry(prop_blob, &property->enum_blob_list, head) { + if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { +@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, + blob = obj_to_blob(obj); + + if (out_resp->length == blob->length) { +- blob_ptr = (void *)(unsigned long)out_resp->data; ++ blob_ptr = (void __user *)(unsigned long)out_resp->data; + if (copy_to_user(blob_ptr, blob->data, blob->length)){ + ret = -EFAULT; + goto done; +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +index 1b8745d..92fdbf6 100644 +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *tmp; + int crtc_mask = 1; + +- WARN(!crtc, "checking null crtc?"); ++ BUG_ON(!crtc); + + dev = crtc->dev; + +@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, + + adjusted_mode = drm_mode_duplicate(dev, mode); + ++ pax_track_stack(); ++ + crtc->enabled = drm_helper_crtc_in_use(crtc); + + if (!crtc->enabled) +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +index 0e27d98..dec8768 100644 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, + char *kdata = NULL; + + atomic_inc(&dev->ioctl_count); +- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]); + ++file_priv->ioctl_count; + + DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c +index 519161e..98c840c 100644 +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev) + } + + for (i = 0; i < ARRAY_SIZE(dev->counts); i++) +- atomic_set(&dev->counts[i], 0); ++ atomic_set_unchecked(&dev->counts[i], 0); + + dev->sigdata.lock = NULL; + +@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp) + + retcode = drm_open_helper(inode, filp, dev); + if (!retcode) { +- atomic_inc(&dev->counts[_DRM_STAT_OPENS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]); + spin_lock(&dev->count_lock); +- if (!dev->open_count++) { ++ if (local_inc_return(&dev->open_count) == 1) { + spin_unlock(&dev->count_lock); + retcode = drm_setup(dev); + goto out; +@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp) + + lock_kernel(); + +- DRM_DEBUG("open_count = %d\n", dev->open_count); ++ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count)); + + if (dev->driver->preclose) + dev->driver->preclose(dev, file_priv); +@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp) + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), +- dev->open_count); ++ local_read(&dev->open_count)); + + /* Release any auth tokens that might point to this file_priv, + (do that under the drm_global_mutex) */ +@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp) + * End inline drm_release + */ + +- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]); + spin_lock(&dev->count_lock); +- if (!--dev->open_count) { ++ if (local_dec_and_test(&dev->open_count)) { + if (atomic_read(&dev->ioctl_count)) { + DRM_ERROR("Device busy: %d\n", + atomic_read(&dev->ioctl_count)); +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index 8bf3770..79422805 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev) + spin_lock_init(&dev->object_name_lock); + idr_init(&dev->object_name_idr); + atomic_set(&dev->object_count, 0); +- atomic_set(&dev->object_memory, 0); ++ atomic_set_unchecked(&dev->object_memory, 0); + atomic_set(&dev->pin_count, 0); +- atomic_set(&dev->pin_memory, 0); ++ atomic_set_unchecked(&dev->pin_memory, 0); + atomic_set(&dev->gtt_count, 0); +- atomic_set(&dev->gtt_memory, 0); ++ atomic_set_unchecked(&dev->gtt_memory, 0); + + mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); + if (!mm) { +@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) + goto fput; + } + atomic_inc(&dev->object_count); +- atomic_add(obj->size, &dev->object_memory); ++ atomic_add_unchecked(obj->size, &dev->object_memory); + return obj; + fput: + fput(obj->filp); +@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref) + + fput(obj->filp); + atomic_dec(&dev->object_count); +- atomic_sub(obj->size, &dev->object_memory); ++ atomic_sub_unchecked(obj->size, &dev->object_memory); + kfree(obj); + } + EXPORT_SYMBOL(drm_gem_object_free); +diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c +index f0f6c6b..34af322 100644 +--- a/drivers/gpu/drm/drm_info.c ++++ b/drivers/gpu/drm/drm_info.c +@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data) + struct drm_local_map *map; + struct drm_map_list *r_list; + +- /* Hardcoded from _DRM_FRAME_BUFFER, +- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and +- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ +- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; ++ static const char * const types[] = { ++ [_DRM_FRAME_BUFFER] = "FB", ++ [_DRM_REGISTERS] = "REG", ++ [_DRM_SHM] = "SHM", ++ [_DRM_AGP] = "AGP", ++ [_DRM_SCATTER_GATHER] = "SG", ++ [_DRM_CONSISTENT] = "PCI", ++ [_DRM_GEM] = "GEM" }; + const char *type; + int i; + +@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data) + map = r_list->map; + if (!map) + continue; +- if (map->type < 0 || map->type > 5) ++ if (map->type >= ARRAY_SIZE(types)) + type = "??"; + else + type = types[map->type]; +@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data) + struct drm_device *dev = node->minor->dev; + + seq_printf(m, "%d objects\n", atomic_read(&dev->object_count)); +- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory)); ++ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory)); + seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count)); +- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory)); +- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory)); ++ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory)); ++ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory)); + seq_printf(m, "%d gtt total\n", dev->gtt_total); + return 0; + } +@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data) + mutex_lock(&dev->struct_mutex); + seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", + atomic_read(&dev->vma_count), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, 0); ++#else + high_memory, (u64)virt_to_phys(high_memory)); ++#endif + + list_for_each_entry(pt, &dev->vmalist, head) { + vma = pt->vma; +@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data) + continue; + seq_printf(m, + "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", +- pt->pid, vma->vm_start, vma->vm_end, ++ pt->pid, ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ 0, 0, ++#else ++ vma->vm_start, vma->vm_end, ++#endif + vma->vm_flags & VM_READ ? 'r' : '-', + vma->vm_flags & VM_WRITE ? 'w' : '-', + vma->vm_flags & VM_EXEC ? 'x' : '-', + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ 0); ++#else + vma->vm_pgoff); ++#endif + + #if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); +diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c +index 282d9fd..71e5f11 100644 +--- a/drivers/gpu/drm/drm_ioc32.c ++++ b/drivers/gpu/drm/drm_ioc32.c +@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; +- list = (struct drm_buf_desc *) (request + 1); ++ list = (struct drm_buf_desc __user *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) +@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; +- list = (struct drm_buf_pub *) (request + 1); ++ list = (struct drm_buf_pub __user *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) +diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c +index 9b9ff46..4ea724c 100644 +--- a/drivers/gpu/drm/drm_ioctl.c ++++ b/drivers/gpu/drm/drm_ioctl.c +@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data, + stats->data[i].value = + (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); + else +- stats->data[i].value = atomic_read(&dev->counts[i]); ++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]); + stats->data[i].type = dev->types[i]; + } + +diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c +index e2f70a5..c703e86 100644 +--- a/drivers/gpu/drm/drm_lock.c ++++ b/drivers/gpu/drm/drm_lock.c +@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + if (drm_lock_take(&master->lock, lock->context)) { + master->lock.file_priv = file_priv; + master->lock.lock_time = jiffies; +- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]); + break; /* Got lock */ + } + +@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) + return -EINVAL; + } + +- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]); + + /* kernel_context_switch isn't used by any of the x86 drm + * modules but is required by the Sparc driver. +diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c +index 7d1d88cd..b9131b2 100644 +--- a/drivers/gpu/drm/i810/i810_dma.c ++++ b/drivers/gpu/drm/i810/i810_dma.c +@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data, + dma->buflist[vertex->idx], + vertex->discard, vertex->used); + +- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); +- atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + +@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data, + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, + mc->last_render); + +- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); +- atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + +diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h +index 21e2691..7321edd 100644 +--- a/drivers/gpu/drm/i810/i810_drv.h ++++ b/drivers/gpu/drm/i810/i810_drv.h +@@ -108,8 +108,8 @@ typedef struct drm_i810_private { + int page_flipping; + + wait_queue_head_t irq_queue; +- atomic_t irq_received; +- atomic_t irq_emitted; ++ atomic_unchecked_t irq_received; ++ atomic_unchecked_t irq_emitted; + + int front_offset; + } drm_i810_private_t; +diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h +index da82afe..48a45de 100644 +--- a/drivers/gpu/drm/i830/i830_drv.h ++++ b/drivers/gpu/drm/i830/i830_drv.h +@@ -115,8 +115,8 @@ typedef struct drm_i830_private { + int page_flipping; + + wait_queue_head_t irq_queue; +- atomic_t irq_received; +- atomic_t irq_emitted; ++ atomic_unchecked_t irq_received; ++ atomic_unchecked_t irq_emitted; + + int use_mi_batchbuffer_start; + +diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c +index 91ec2bb..6f21fab 100644 +--- a/drivers/gpu/drm/i830/i830_irq.c ++++ b/drivers/gpu/drm/i830/i830_irq.c +@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS) + + I830_WRITE16(I830REG_INT_IDENTITY_R, temp); + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + wake_up_interruptible(&dev_priv->irq_queue); + + return IRQ_HANDLED; +@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev) + + DRM_DEBUG("%s\n", __func__); + +- atomic_inc(&dev_priv->irq_emitted); ++ atomic_inc_unchecked(&dev_priv->irq_emitted); + + BEGIN_LP_RING(2); + OUT_RING(0); + OUT_RING(GFX_OP_USER_INTERRUPT); + ADVANCE_LP_RING(); + +- return atomic_read(&dev_priv->irq_emitted); ++ return atomic_read_unchecked(&dev_priv->irq_emitted); + } + + static int i830_wait_irq(struct drm_device * dev, int irq_nr) +@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr) + + DRM_DEBUG("%s\n", __func__); + +- if (atomic_read(&dev_priv->irq_received) >= irq_nr) ++ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr) + return 0; + + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; +@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr) + + for (;;) { + __set_current_state(TASK_INTERRUPTIBLE); +- if (atomic_read(&dev_priv->irq_received) >= irq_nr) ++ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr) + break; + if ((signed)(end - jiffies) <= 0) { + DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n", +@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev) + I830_WRITE16(I830REG_HWSTAM, 0xffff); + I830_WRITE16(I830REG_INT_MASK_R, 0x0); + I830_WRITE16(I830REG_INT_ENABLE_R, 0x0); +- atomic_set(&dev_priv->irq_received, 0); +- atomic_set(&dev_priv->irq_emitted, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_emitted, 0); + init_waitqueue_head(&dev_priv->irq_queue); + } + +diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h +index 288fc50..c6092055 100644 +--- a/drivers/gpu/drm/i915/dvo.h ++++ b/drivers/gpu/drm/i915/dvo.h +@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops { + * + * \return singly-linked list of modes or NULL if no modes found. + */ +- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); ++ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo); + + /** + * Clean up driver-specific bits of the output + */ +- void (*destroy) (struct intel_dvo_device *dvo); ++ void (* const destroy) (struct intel_dvo_device *dvo); + + /** + * Debugging hook to dump device registers to log file + */ +- void (*dump_regs)(struct intel_dvo_device *dvo); ++ void (* const dump_regs)(struct intel_dvo_device *dvo); + }; + +-extern struct intel_dvo_dev_ops sil164_ops; +-extern struct intel_dvo_dev_ops ch7xxx_ops; +-extern struct intel_dvo_dev_ops ivch_ops; +-extern struct intel_dvo_dev_ops tfp410_ops; +-extern struct intel_dvo_dev_ops ch7017_ops; ++extern const struct intel_dvo_dev_ops sil164_ops; ++extern const struct intel_dvo_dev_ops ch7xxx_ops; ++extern const struct intel_dvo_dev_ops ivch_ops; ++extern const struct intel_dvo_dev_ops tfp410_ops; ++extern const struct intel_dvo_dev_ops ch7017_ops; + + #endif /* _INTEL_DVO_H */ +diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c +index 621815b..499d82e 100644 +--- a/drivers/gpu/drm/i915/dvo_ch7017.c ++++ b/drivers/gpu/drm/i915/dvo_ch7017.c +@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo) + } + } + +-struct intel_dvo_dev_ops ch7017_ops = { ++const struct intel_dvo_dev_ops ch7017_ops = { + .init = ch7017_init, + .detect = ch7017_detect, + .mode_valid = ch7017_mode_valid, +diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c +index a9b8962..ac769ba 100644 +--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c ++++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c +@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo) + } + } + +-struct intel_dvo_dev_ops ch7xxx_ops = { ++const struct intel_dvo_dev_ops ch7xxx_ops = { + .init = ch7xxx_init, + .detect = ch7xxx_detect, + .mode_valid = ch7xxx_mode_valid, +diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c +index aa176f9..ed2930c 100644 +--- a/drivers/gpu/drm/i915/dvo_ivch.c ++++ b/drivers/gpu/drm/i915/dvo_ivch.c +@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo) + } + } + +-struct intel_dvo_dev_ops ivch_ops= { ++const struct intel_dvo_dev_ops ivch_ops= { + .init = ivch_init, + .dpms = ivch_dpms, + .save = ivch_save, +diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c +index e1c1f73..7dbebcf 100644 +--- a/drivers/gpu/drm/i915/dvo_sil164.c ++++ b/drivers/gpu/drm/i915/dvo_sil164.c +@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo) + } + } + +-struct intel_dvo_dev_ops sil164_ops = { ++const struct intel_dvo_dev_ops sil164_ops = { + .init = sil164_init, + .detect = sil164_detect, + .mode_valid = sil164_mode_valid, +diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c +index 16dce84..7e1b6f8 100644 +--- a/drivers/gpu/drm/i915/dvo_tfp410.c ++++ b/drivers/gpu/drm/i915/dvo_tfp410.c +@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo) + } + } + +-struct intel_dvo_dev_ops tfp410_ops = { ++const struct intel_dvo_dev_ops tfp410_ops = { + .init = tfp410_init, + .detect = tfp410_detect, + .mode_valid = tfp410_mode_valid, +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index 7e859d6..7d1cf2b 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) + I915_READ(GTIMR)); + } + seq_printf(m, "Interrupts received: %d\n", +- atomic_read(&dev_priv->irq_received)); ++ atomic_read_unchecked(&dev_priv->irq_received)); + if (dev_priv->hw_status_page != NULL) { + seq_printf(m, "Current sequence: %d\n", + i915_get_gem_seqno(dev)); +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index 5449239..7e4f68d 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev) + return i915_resume(dev); + } + +-static struct vm_operations_struct i915_gem_vm_ops = { ++static const struct vm_operations_struct i915_gem_vm_ops = { + .fault = i915_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 97163f7..c24c7c7 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -168,7 +168,7 @@ struct drm_i915_display_funcs { + /* display clock increase/decrease */ + /* pll clock increase/decrease */ + /* clock gating init */ +-}; ++} __no_const; + + typedef struct drm_i915_private { + struct drm_device *dev; +@@ -197,7 +197,7 @@ typedef struct drm_i915_private { + int page_flipping; + + wait_queue_head_t irq_queue; +- atomic_t irq_received; ++ atomic_unchecked_t irq_received; + /** Protects user_irq_refcount and irq_mask_reg */ + spinlock_t user_irq_lock; + /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 27a3074..eb3f959 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + + args->aper_size = dev->gtt_total; + args->aper_available_size = (args->aper_size - +- atomic_read(&dev->pin_memory)); ++ atomic_read_unchecked(&dev->pin_memory)); + + return 0; + } +@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) + + if (obj_priv->gtt_space) { + atomic_dec(&dev->gtt_count); +- atomic_sub(obj->size, &dev->gtt_memory); ++ atomic_sub_unchecked(obj->size, &dev->gtt_memory); + + drm_mm_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; +@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) + goto search_free; + } + atomic_inc(&dev->gtt_count); +- atomic_add(obj->size, &dev->gtt_memory); ++ atomic_add_unchecked(obj->size, &dev->gtt_memory); + + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in +@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, + "%d/%d gtt bytes\n", + atomic_read(&dev->object_count), + atomic_read(&dev->pin_count), +- atomic_read(&dev->object_memory), +- atomic_read(&dev->pin_memory), +- atomic_read(&dev->gtt_memory), ++ atomic_read_unchecked(&dev->object_memory), ++ atomic_read_unchecked(&dev->pin_memory), ++ atomic_read_unchecked(&dev->gtt_memory), + dev->gtt_total); + } + goto err; +@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) + */ + if (obj_priv->pin_count == 1) { + atomic_inc(&dev->pin_count); +- atomic_add(obj->size, &dev->pin_memory); ++ atomic_add_unchecked(obj->size, &dev->pin_memory); + if (!obj_priv->active && + (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 && + !list_empty(&obj_priv->list)) +@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) + list_move_tail(&obj_priv->list, + &dev_priv->mm.inactive_list); + atomic_dec(&dev->pin_count); +- atomic_sub(obj->size, &dev->pin_memory); ++ atomic_sub_unchecked(obj->size, &dev->pin_memory); + } + i915_verify_inactive(dev, __FILE__, __LINE__); + } +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 63f28ad..f5469da 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + int irq_received; + int ret = IRQ_NONE; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + if (IS_IGDNG(dev)) + return igdng_irq_handler(dev); +@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +index 5d9c6a7..d1b0e29 100644 +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) + sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); + + /* Save the bit-banging i2c functionality for use by the DDC wrapper */ +- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; ++ pax_open_kernel(); ++ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; ++ pax_close_kernel(); + + /* Read the regs to test if we can talk to the device */ + for (i = 0; i < 0x40; i++) { +diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h +index be6c6b9..8615d9c 100644 +--- a/drivers/gpu/drm/mga/mga_drv.h ++++ b/drivers/gpu/drm/mga/mga_drv.h +@@ -120,9 +120,9 @@ typedef struct drm_mga_private { + u32 clear_cmd; + u32 maccess; + +- atomic_t vbl_received; /**< Number of vblanks received. */ ++ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */ + wait_queue_head_t fence_queue; +- atomic_t last_fence_retired; ++ atomic_unchecked_t last_fence_retired; + u32 next_fence_to_post; + + unsigned int fb_cpp; +diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c +index daa6041..a28a5da 100644 +--- a/drivers/gpu/drm/mga/mga_irq.c ++++ b/drivers/gpu/drm/mga/mga_irq.c +@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + +@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) + /* VBLANK interrupt */ + if (status & MGA_VLINEPEN) { + MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); +- atomic_inc(&dev_priv->vbl_received); ++ atomic_inc_unchecked(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); + handled = 1; + } +@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) + MGA_WRITE(MGA_PRIMEND, prim_end); + } + +- atomic_inc(&dev_priv->last_fence_retired); ++ atomic_inc_unchecked(&dev_priv->last_fence_retired); + DRM_WAKEUP(&dev_priv->fence_queue); + handled = 1; + } +@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) + * using fences. + */ + DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, +- (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) ++ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired)) + - *sequence) <= (1 << 23))); + + *sequence = cur_fence; +diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c +index 4c39a40..b22a9ea 100644 +--- a/drivers/gpu/drm/r128/r128_cce.c ++++ b/drivers/gpu/drm/r128/r128_cce.c +@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) + + /* GH: Simple idle check. + */ +- atomic_set(&dev_priv->idle_count, 0); ++ atomic_set_unchecked(&dev_priv->idle_count, 0); + + /* We don't support anything other than bus-mastering ring mode, + * but the ring can be in either AGP or PCI space for the ring +diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h +index 3c60829..4faf484 100644 +--- a/drivers/gpu/drm/r128/r128_drv.h ++++ b/drivers/gpu/drm/r128/r128_drv.h +@@ -90,14 +90,14 @@ typedef struct drm_r128_private { + int is_pci; + unsigned long cce_buffers_offset; + +- atomic_t idle_count; ++ atomic_unchecked_t idle_count; + + int page_flipping; + int current_page; + u32 crtc_offset; + u32 crtc_offset_cntl; + +- atomic_t vbl_received; ++ atomic_unchecked_t vbl_received; + + u32 color_fmt; + unsigned int front_offset; +diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c +index 69810fb..97bf17a 100644 +--- a/drivers/gpu/drm/r128/r128_irq.c ++++ b/drivers/gpu/drm/r128/r128_irq.c +@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) +@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) + /* VBLANK interrupt */ + if (status & R128_CRTC_VBLANK_INT) { + R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); +- atomic_inc(&dev_priv->vbl_received); ++ atomic_inc_unchecked(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); + return IRQ_HANDLED; + } +diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c +index af2665c..51922d2 100644 +--- a/drivers/gpu/drm/r128/r128_state.c ++++ b/drivers/gpu/drm/r128/r128_state.c +@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv, + + static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv) + { +- if (atomic_read(&dev_priv->idle_count) == 0) { ++ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) { + r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); + } else { +- atomic_set(&dev_priv->idle_count, 0); ++ atomic_set_unchecked(&dev_priv->idle_count, 0); + } + } + +diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c +index dd72b91..8644b3c 100644 +--- a/drivers/gpu/drm/radeon/atom.c ++++ b/drivers/gpu/drm/radeon/atom.c +@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) + char name[512]; + int i; + ++ pax_track_stack(); ++ + ctx->card = card; + ctx->bios = bios; + +diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c +index 0d79577..efaa7a5 100644 +--- a/drivers/gpu/drm/radeon/mkregtable.c ++++ b/drivers/gpu/drm/radeon/mkregtable.c +@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename) + regex_t mask_rex; + regmatch_t match[4]; + char buf[1024]; +- size_t end; ++ long end; + int len; + int done = 0; + int r; + unsigned o; + struct offset *offset; + char last_reg_s[10]; +- int last_reg; ++ unsigned long last_reg; + + if (regcomp + (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h +index 6735213..38c2c67 100644 +--- a/drivers/gpu/drm/radeon/radeon.h ++++ b/drivers/gpu/drm/radeon/radeon.h +@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev); + */ + struct radeon_fence_driver { + uint32_t scratch_reg; +- atomic_t seq; ++ atomic_unchecked_t seq; + uint32_t last_seq; + unsigned long count_timeout; + wait_queue_head_t queue; +@@ -640,7 +640,7 @@ struct radeon_asic { + uint32_t offset, uint32_t obj_size); + int (*clear_surface_reg)(struct radeon_device *rdev, int reg); + void (*bandwidth_update)(struct radeon_device *rdev); +-}; ++} __no_const; + + /* + * Asic structures +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 4e928b9..d8b6008 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + bool linkb; + struct radeon_i2c_bus_rec ddc_bus; + ++ pax_track_stack(); ++ + atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); + + if (data_offset == 0) +@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev, + } + } + +-struct bios_connector { ++static struct bios_connector { + bool valid; + uint16_t line_mux; + uint16_t devices; + int connector_type; + struct radeon_i2c_bus_rec ddc_bus; +-}; ++} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; + + bool radeon_get_atom_connector_info_from_supported_devices_table(struct + drm_device +@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct + uint8_t dac; + union atom_supported_devices *supported_devices; + int i, j; +- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; + + atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); + +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index 083a181..ccccae0 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll, + + if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { + error = freq - current_freq; +- error = error < 0 ? 0xffffffff : error; ++ error = (int32_t)error < 0 ? 0xffffffff : error; + } else + error = abs(current_freq - freq); + vco_diff = abs(vco - best_vco); +diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h +index 76e4070..193fa7f 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.h ++++ b/drivers/gpu/drm/radeon/radeon_drv.h +@@ -253,7 +253,7 @@ typedef struct drm_radeon_private { + + /* SW interrupt */ + wait_queue_head_t swi_queue; +- atomic_t swi_emitted; ++ atomic_unchecked_t swi_emitted; + int vblank_crtc; + uint32_t irq_enable_reg; + uint32_t r500_disp_irq_reg; +diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c +index 3beb26d..6ce9c4a 100644 +--- a/drivers/gpu/drm/radeon/radeon_fence.c ++++ b/drivers/gpu/drm/radeon/radeon_fence.c +@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) + write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); + return 0; + } +- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); ++ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq); + if (!rdev->cp.ready) { + /* FIXME: cp is not running assume everythings is done right + * away +@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) + return r; + } + WREG32(rdev->fence_drv.scratch_reg, 0); +- atomic_set(&rdev->fence_drv.seq, 0); ++ atomic_set_unchecked(&rdev->fence_drv.seq, 0); + INIT_LIST_HEAD(&rdev->fence_drv.created); + INIT_LIST_HEAD(&rdev->fence_drv.emited); + INIT_LIST_HEAD(&rdev->fence_drv.signaled); +diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c +index a1bf11d..4a123c0 100644 +--- a/drivers/gpu/drm/radeon/radeon_ioc32.c ++++ b/drivers/gpu/drm/radeon/radeon_ioc32.c +@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.param, &request->param) +- || __put_user((void __user *)(unsigned long)req32.value, ++ || __put_user((unsigned long)req32.value, + &request->value)) + return -EFAULT; + +diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c +index b79ecc4..8dab92d 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq.c ++++ b/drivers/gpu/drm/radeon/radeon_irq.c +@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev) + unsigned int ret; + RING_LOCALS; + +- atomic_inc(&dev_priv->swi_emitted); +- ret = atomic_read(&dev_priv->swi_emitted); ++ atomic_inc_unchecked(&dev_priv->swi_emitted); ++ ret = atomic_read_unchecked(&dev_priv->swi_emitted); + + BEGIN_RING(4); + OUT_RING_REG(RADEON_LAST_SWI_REG, ret); +@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *) dev->dev_private; + +- atomic_set(&dev_priv->swi_emitted, 0); ++ atomic_set_unchecked(&dev_priv->swi_emitted, 0); + DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); + + dev->max_vblank_count = 0x001fffff; +diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c +index 4747910..48ca4b3 100644 +--- a/drivers/gpu/drm/radeon/radeon_state.c ++++ b/drivers/gpu/drm/radeon/radeon_state.c +@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil + { + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_getparam_t *param = data; +- int value; ++ int value = 0; + + DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); + +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 1381e06..0e53b17 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev) + DRM_INFO("radeon: ttm finalized\n"); + } + +-static struct vm_operations_struct radeon_ttm_vm_ops; +-static const struct vm_operations_struct *ttm_vm_ops = NULL; +- +-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +-{ +- struct ttm_buffer_object *bo; +- int r; +- +- bo = (struct ttm_buffer_object *)vma->vm_private_data; +- if (bo == NULL) { +- return VM_FAULT_NOPAGE; +- } +- r = ttm_vm_ops->fault(vma, vmf); +- return r; +-} +- + int radeon_mmap(struct file *filp, struct vm_area_struct *vma) + { + struct drm_file *file_priv; + struct radeon_device *rdev; +- int r; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { + return drm_mmap(filp, vma); +@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) + + file_priv = (struct drm_file *)filp->private_data; + rdev = file_priv->minor->dev->dev_private; +- if (rdev == NULL) { ++ if (!rdev) + return -EINVAL; +- } +- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); +- if (unlikely(r != 0)) { +- return r; +- } +- if (unlikely(ttm_vm_ops == NULL)) { +- ttm_vm_ops = vma->vm_ops; +- radeon_ttm_vm_ops = *ttm_vm_ops; +- radeon_ttm_vm_ops.fault = &radeon_ttm_fault; +- } +- vma->vm_ops = &radeon_ttm_vm_ops; +- return 0; ++ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev); + } + + +diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c +index b12ff76..0bd0c6e 100644 +--- a/drivers/gpu/drm/radeon/rs690.c ++++ b/drivers/gpu/drm/radeon/rs690.c +@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && + rdev->pm.sideport_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; +- read_delay_latency.full = rfixed_const(370 * 800 * 1000); ++ read_delay_latency.full = rfixed_const(800 * 1000); + read_delay_latency.full = rfixed_div(read_delay_latency, + rdev->pm.igp_sideport_mclk); ++ a.full = rfixed_const(370); ++ read_delay_latency.full = rfixed_mul(read_delay_latency, a); + } else { + if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && + rdev->pm.k8_bandwidth.full) +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c +index 0ed436e..e6e7ce3 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo.c ++++ b/drivers/gpu/drm/ttm/ttm_bo.c +@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = { + NULL + }; + +-static struct sysfs_ops ttm_bo_global_ops = { ++static const struct sysfs_ops ttm_bo_global_ops = { + .show = &ttm_bo_global_show + }; + +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c +index 1c040d0..f9e4af8 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c +@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + { + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; +- struct ttm_bo_device *bdev = bo->bdev; ++ struct ttm_bo_device *bdev; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; +@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + unsigned long address = (unsigned long)vmf->virtual_address; + int retval = VM_FAULT_NOPAGE; + ++ if (!bo) ++ return VM_FAULT_NOPAGE; ++ bdev = bo->bdev; ++ + /* + * Work around locking order reversal in fault / nopfn + * between mmap_sem and bo_reserve: Perform a trylock operation +diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c +index b170071..28ae90e 100644 +--- a/drivers/gpu/drm/ttm/ttm_global.c ++++ b/drivers/gpu/drm/ttm/ttm_global.c +@@ -36,7 +36,7 @@ + struct ttm_global_item { + struct mutex mutex; + void *object; +- int refcount; ++ atomic_t refcount; + }; + + static struct ttm_global_item glob[TTM_GLOBAL_NUM]; +@@ -49,7 +49,7 @@ void ttm_global_init(void) + struct ttm_global_item *item = &glob[i]; + mutex_init(&item->mutex); + item->object = NULL; +- item->refcount = 0; ++ atomic_set(&item->refcount, 0); + } + } + +@@ -59,7 +59,7 @@ void ttm_global_release(void) + for (i = 0; i < TTM_GLOBAL_NUM; ++i) { + struct ttm_global_item *item = &glob[i]; + BUG_ON(item->object != NULL); +- BUG_ON(item->refcount != 0); ++ BUG_ON(atomic_read(&item->refcount) != 0); + } + } + +@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) + void *object; + + mutex_lock(&item->mutex); +- if (item->refcount == 0) { ++ if (atomic_read(&item->refcount) == 0) { + item->object = kzalloc(ref->size, GFP_KERNEL); + if (unlikely(item->object == NULL)) { + ret = -ENOMEM; +@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) + goto out_err; + + } +- ++item->refcount; ++ atomic_inc(&item->refcount); + ref->object = item->object; + object = item->object; + mutex_unlock(&item->mutex); +@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref) + struct ttm_global_item *item = &glob[ref->global_type]; + + mutex_lock(&item->mutex); +- BUG_ON(item->refcount == 0); ++ BUG_ON(atomic_read(&item->refcount) == 0); + BUG_ON(ref->object != item->object); +- if (--item->refcount == 0) { ++ if (atomic_dec_and_test(&item->refcount)) { + ref->release(ref); + item->object = NULL; + } +diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c +index 072c281..d8ef483 100644 +--- a/drivers/gpu/drm/ttm/ttm_memory.c ++++ b/drivers/gpu/drm/ttm/ttm_memory.c +@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = { + NULL + }; + +-static struct sysfs_ops ttm_mem_zone_ops = { ++static const struct sysfs_ops ttm_mem_zone_ops = { + .show = &ttm_mem_zone_show, + .store = &ttm_mem_zone_store + }; +diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h +index cafcb84..b8e66cc 100644 +--- a/drivers/gpu/drm/via/via_drv.h ++++ b/drivers/gpu/drm/via/via_drv.h +@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer { + typedef uint32_t maskarray_t[5]; + + typedef struct drm_via_irq { +- atomic_t irq_received; ++ atomic_unchecked_t irq_received; + uint32_t pending_mask; + uint32_t enable_mask; + wait_queue_head_t irq_queue; +@@ -75,7 +75,7 @@ typedef struct drm_via_private { + struct timeval last_vblank; + int last_vblank_valid; + unsigned usec_per_vblank; +- atomic_t vbl_received; ++ atomic_unchecked_t vbl_received; + drm_via_state_t hc_state; + char pci_buf[VIA_PCI_BUF_SIZE]; + const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; +diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c +index 5935b88..127a8a6 100644 +--- a/drivers/gpu/drm/via/via_irq.c ++++ b/drivers/gpu/drm/via/via_irq.c +@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) +@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) + + status = VIA_READ(VIA_REG_INTERRUPT); + if (status & VIA_IRQ_VBLANK_PENDING) { +- atomic_inc(&dev_priv->vbl_received); +- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { ++ atomic_inc_unchecked(&dev_priv->vbl_received); ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) { + do_gettimeofday(&cur_vblank); + if (dev_priv->last_vblank_valid) { + dev_priv->usec_per_vblank = +@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) + dev_priv->last_vblank = cur_vblank; + dev_priv->last_vblank_valid = 1; + } +- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) { + DRM_DEBUG("US per vblank is: %u\n", + dev_priv->usec_per_vblank); + } +@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) + + for (i = 0; i < dev_priv->num_irqs; ++i) { + if (status & cur_irq->pending_mask) { +- atomic_inc(&cur_irq->irq_received); ++ atomic_inc_unchecked(&cur_irq->irq_received); + DRM_WAKEUP(&cur_irq->irq_queue); + handled = 1; + if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { +@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, + ((VIA_READ(masks[irq][2]) & masks[irq][3]) == + masks[irq][4])); +- cur_irq_sequence = atomic_read(&cur_irq->irq_received); ++ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received); + } else { + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, + (((cur_irq_sequence = +- atomic_read(&cur_irq->irq_received)) - ++ atomic_read_unchecked(&cur_irq->irq_received)) - + *sequence) <= (1 << 23))); + } + *sequence = cur_irq_sequence; +@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev) + } + + for (i = 0; i < dev_priv->num_irqs; ++i) { +- atomic_set(&cur_irq->irq_received, 0); ++ atomic_set_unchecked(&cur_irq->irq_received, 0); + cur_irq->enable_mask = dev_priv->irq_masks[i][0]; + cur_irq->pending_mask = dev_priv->irq_masks[i][1]; + DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); +@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) + switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { + case VIA_IRQ_RELATIVE: + irqwait->request.sequence += +- atomic_read(&cur_irq->irq_received); ++ atomic_read_unchecked(&cur_irq->irq_received); + irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; + case VIA_IRQ_ABSOLUTE: + break; +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c +index aa8688d..6a0140c 100644 +--- a/drivers/gpu/vga/vgaarb.c ++++ b/drivers/gpu/vga/vgaarb.c +@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + uc = &priv->cards[i]; + } + +- if (!uc) +- return -EINVAL; ++ if (!uc) { ++ ret_val = -EINVAL; ++ goto done; ++ } + +- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) +- return -EINVAL; ++ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { ++ ret_val = -EINVAL; ++ goto done; ++ } + +- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) +- return -EINVAL; ++ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { ++ ret_val = -EINVAL; ++ goto done; ++ } + + vga_put(pdev, io_state); + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 11f8069..4783396 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev) + + int hid_add_device(struct hid_device *hdev) + { +- static atomic_t id = ATOMIC_INIT(0); ++ static atomic_unchecked_t id = ATOMIC_INIT(0); + int ret; + + if (WARN_ON(hdev->status & HID_STAT_ADDED)) +@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev) + /* XXX hack, any other cleaner solution after the driver core + * is converted to allow more than 20 bytes as the device name? */ + dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, +- hdev->vendor, hdev->product, atomic_inc_return(&id)); ++ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id)); + + ret = device_add(&hdev->dev); + if (!ret) +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c +index 8b6ee24..70f657d 100644 +--- a/drivers/hid/usbhid/hiddev.c ++++ b/drivers/hid/usbhid/hiddev.c +@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + return put_user(HID_VERSION, (int __user *)arg); + + case HIDIOCAPPLICATION: +- if (arg < 0 || arg >= hid->maxapplication) ++ if (arg >= hid->maxapplication) + return -EINVAL; + + for (i = 0; i < hid->maxcollection; i++) +diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c +index 5d5ed69..f40533e 100644 +--- a/drivers/hwmon/lis3lv02d.c ++++ b/drivers/hwmon/lis3lv02d.c +@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) + * the lid is closed. This leads to interrupts as soon as a little move + * is done. + */ +- atomic_inc(&lis3_dev.count); ++ atomic_inc_unchecked(&lis3_dev.count); + + wake_up_interruptible(&lis3_dev.misc_wait); + kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); +@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file) + if (test_and_set_bit(0, &lis3_dev.misc_opened)) + return -EBUSY; /* already open */ + +- atomic_set(&lis3_dev.count, 0); ++ atomic_set_unchecked(&lis3_dev.count, 0); + + /* + * The sensor can generate interrupts for free-fall and direction +@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, + add_wait_queue(&lis3_dev.misc_wait, &wait); + while (true) { + set_current_state(TASK_INTERRUPTIBLE); +- data = atomic_xchg(&lis3_dev.count, 0); ++ data = atomic_xchg_unchecked(&lis3_dev.count, 0); + if (data) + break; + +@@ -244,7 +244,7 @@ out: + static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait) + { + poll_wait(file, &lis3_dev.misc_wait, wait); +- if (atomic_read(&lis3_dev.count)) ++ if (atomic_read_unchecked(&lis3_dev.count)) + return POLLIN | POLLRDNORM; + return 0; + } +diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h +index 7cdd76f..fe0efdf 100644 +--- a/drivers/hwmon/lis3lv02d.h ++++ b/drivers/hwmon/lis3lv02d.h +@@ -201,7 +201,7 @@ struct lis3lv02d { + + struct input_polled_dev *idev; /* input device */ + struct platform_device *pdev; /* platform device */ +- atomic_t count; /* interrupt count after last read */ ++ atomic_unchecked_t count; /* interrupt count after last read */ + int xcalib; /* calibrated null value for x */ + int ycalib; /* calibrated null value for y */ + int zcalib; /* calibrated null value for z */ +diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c +index 740785e..5a5c6c6 100644 +--- a/drivers/hwmon/sht15.c ++++ b/drivers/hwmon/sht15.c +@@ -112,7 +112,7 @@ struct sht15_data { + int supply_uV; + int supply_uV_valid; + struct work_struct update_supply_work; +- atomic_t interrupt_handled; ++ atomic_unchecked_t interrupt_handled; + }; + + /** +@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data, + return ret; + + gpio_direction_input(data->pdata->gpio_data); +- atomic_set(&data->interrupt_handled, 0); ++ atomic_set_unchecked(&data->interrupt_handled, 0); + + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + if (gpio_get_value(data->pdata->gpio_data) == 0) { + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + /* Only relevant if the interrupt hasn't occured. */ +- if (!atomic_read(&data->interrupt_handled)) ++ if (!atomic_read_unchecked(&data->interrupt_handled)) + schedule_work(&data->read_work); + } + ret = wait_event_timeout(data->wait_queue, +@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d) + struct sht15_data *data = d; + /* First disable the interrupt */ + disable_irq_nosync(irq); +- atomic_inc(&data->interrupt_handled); ++ atomic_inc_unchecked(&data->interrupt_handled); + /* Then schedule a reading work struct */ + if (data->flag != SHT15_READING_NOTHING) + schedule_work(&data->read_work); +@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s) + here as could have gone low in meantime so verify + it hasn't! + */ +- atomic_set(&data->interrupt_handled, 0); ++ atomic_set_unchecked(&data->interrupt_handled, 0); + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + /* If still not occured or another handler has been scheduled */ + if (gpio_get_value(data->pdata->gpio_data) +- || atomic_read(&data->interrupt_handled)) ++ || atomic_read_unchecked(&data->interrupt_handled)) + return; + } + /* Read the data back from the device */ +diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c +index 97851c5..cb40626 100644 +--- a/drivers/hwmon/w83791d.c ++++ b/drivers/hwmon/w83791d.c +@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); + static int w83791d_remove(struct i2c_client *client); + +-static int w83791d_read(struct i2c_client *client, u8 register); +-static int w83791d_write(struct i2c_client *client, u8 register, u8 value); ++static int w83791d_read(struct i2c_client *client, u8 reg); ++static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); + static struct w83791d_data *w83791d_update_device(struct device *dev); + + #ifdef DEBUG +diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c +index 378fcb5..5e91fa8 100644 +--- a/drivers/i2c/busses/i2c-amd756-s4882.c ++++ b/drivers/i2c/busses/i2c-amd756-s4882.c +@@ -43,7 +43,7 @@ + extern struct i2c_adapter amd756_smbus; + + static struct i2c_adapter *s4882_adapter; +-static struct i2c_algorithm *s4882_algo; ++static i2c_algorithm_no_const *s4882_algo; + + /* Wrapper access functions for multiplexed SMBus */ + static DEFINE_MUTEX(amd756_lock); +diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c +index 29015eb..af2d8e9 100644 +--- a/drivers/i2c/busses/i2c-nforce2-s4985.c ++++ b/drivers/i2c/busses/i2c-nforce2-s4985.c +@@ -41,7 +41,7 @@ + extern struct i2c_adapter *nforce2_smbus; + + static struct i2c_adapter *s4985_adapter; +-static struct i2c_algorithm *s4985_algo; ++static i2c_algorithm_no_const *s4985_algo; + + /* Wrapper access functions for multiplexed SMBus */ + static DEFINE_MUTEX(nforce2_lock); +diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c +index 878f8ec..12376fc 100644 +--- a/drivers/ide/aec62xx.c ++++ b/drivers/ide/aec62xx.c +@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = { + .cable_detect = atp86x_cable_detect, + }; + +-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { ++static const struct ide_port_info aec62xx_chipsets[] __devinitconst = { + { /* 0: AEC6210 */ + .name = DRV_NAME, + .init_chipset = init_chipset_aec62xx, +diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c +index e59b6de..4b4fc65 100644 +--- a/drivers/ide/alim15x3.c ++++ b/drivers/ide/alim15x3.c +@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info ali15x3_chipset __devinitdata = { ++static const struct ide_port_info ali15x3_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_ali15x3, + .init_hwif = init_hwif_ali15x3, +diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c +index 628cd2e..087a414 100644 +--- a/drivers/ide/amd74xx.c ++++ b/drivers/ide/amd74xx.c +@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = { + .udma_mask = udma, \ + } + +-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = { ++static const struct ide_port_info amd74xx_chipsets[] __devinitconst = { + /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2), + /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4), + /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5), +diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c +index 837322b..837fd71 100644 +--- a/drivers/ide/atiixp.c ++++ b/drivers/ide/atiixp.c +@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = { + .cable_detect = atiixp_cable_detect, + }; + +-static const struct ide_port_info atiixp_pci_info[] __devinitdata = { ++static const struct ide_port_info atiixp_pci_info[] __devinitconst = { + { /* 0: IXP200/300/400/700 */ + .name = DRV_NAME, + .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, +diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c +index ca0c46f..d55318a 100644 +--- a/drivers/ide/cmd64x.c ++++ b/drivers/ide/cmd64x.c +@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { ++static const struct ide_port_info cmd64x_chipsets[] __devinitconst = { + { /* 0: CMD643 */ + .name = DRV_NAME, + .init_chipset = init_chipset_cmd64x, +diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c +index 09f98ed..cebc5bc 100644 +--- a/drivers/ide/cs5520.c ++++ b/drivers/ide/cs5520.c +@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = { + .set_dma_mode = cs5520_set_dma_mode, + }; + +-static const struct ide_port_info cyrix_chipset __devinitdata = { ++static const struct ide_port_info cyrix_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } }, + .port_ops = &cs5520_port_ops, +diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c +index 40bf05e..7d58ca0 100644 +--- a/drivers/ide/cs5530.c ++++ b/drivers/ide/cs5530.c +@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = { + .udma_filter = cs5530_udma_filter, + }; + +-static const struct ide_port_info cs5530_chipset __devinitdata = { ++static const struct ide_port_info cs5530_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_cs5530, + .init_hwif = init_hwif_cs5530, +diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c +index 983d957..53e6172 100644 +--- a/drivers/ide/cs5535.c ++++ b/drivers/ide/cs5535.c +@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = { + .cable_detect = cs5535_cable_detect, + }; + +-static const struct ide_port_info cs5535_chipset __devinitdata = { ++static const struct ide_port_info cs5535_chipset __devinitconst = { + .name = DRV_NAME, + .port_ops = &cs5535_port_ops, + .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE, +diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c +index 74fc540..8e933d8 100644 +--- a/drivers/ide/cy82c693.c ++++ b/drivers/ide/cy82c693.c +@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = { + .set_dma_mode = cy82c693_set_dma_mode, + }; + +-static const struct ide_port_info cy82c693_chipset __devinitdata = { ++static const struct ide_port_info cy82c693_chipset __devinitconst = { + .name = DRV_NAME, + .init_iops = init_iops_cy82c693, + .port_ops = &cy82c693_port_ops, +diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c +index 7ce68ef..e78197d 100644 +--- a/drivers/ide/hpt366.c ++++ b/drivers/ide/hpt366.c +@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = { + } + }; + +-static const struct hpt_info hpt36x __devinitdata = { ++static const struct hpt_info hpt36x __devinitconst = { + .chip_name = "HPT36x", + .chip_type = HPT36x, + .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, +@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = { + .timings = &hpt36x_timings + }; + +-static const struct hpt_info hpt370 __devinitdata = { ++static const struct hpt_info hpt370 __devinitconst = { + .chip_name = "HPT370", + .chip_type = HPT370, + .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, +@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt370a __devinitdata = { ++static const struct hpt_info hpt370a __devinitconst = { + .chip_name = "HPT370A", + .chip_type = HPT370A, + .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, +@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt374 __devinitdata = { ++static const struct hpt_info hpt374 __devinitconst = { + .chip_name = "HPT374", + .chip_type = HPT374, + .udma_mask = ATA_UDMA5, +@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt372 __devinitdata = { ++static const struct hpt_info hpt372 __devinitconst = { + .chip_name = "HPT372", + .chip_type = HPT372, + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt372a __devinitdata = { ++static const struct hpt_info hpt372a __devinitconst = { + .chip_name = "HPT372A", + .chip_type = HPT372A, + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt302 __devinitdata = { ++static const struct hpt_info hpt302 __devinitconst = { + .chip_name = "HPT302", + .chip_type = HPT302, + .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt371 __devinitdata = { ++static const struct hpt_info hpt371 __devinitconst = { + .chip_name = "HPT371", + .chip_type = HPT371, + .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt372n __devinitdata = { ++static const struct hpt_info hpt372n __devinitconst = { + .chip_name = "HPT372N", + .chip_type = HPT372N, + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt302n __devinitdata = { ++static const struct hpt_info hpt302n __devinitconst = { + .chip_name = "HPT302N", + .chip_type = HPT302N, + .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt371n __devinitdata = { ++static const struct hpt_info hpt371n __devinitconst = { + .chip_name = "HPT371N", + .chip_type = HPT371N, + .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info hpt366_chipsets[] __devinitdata = { ++static const struct ide_port_info hpt366_chipsets[] __devinitconst = { + { /* 0: HPT36x */ + .name = DRV_NAME, + .init_chipset = init_chipset_hpt366, +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c +index 2de76cc..74186a1 100644 +--- a/drivers/ide/ide-cd.c ++++ b/drivers/ide/ide-cd.c +@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) + alignment = queue_dma_alignment(q) | q->dma_pad_mask; + if ((unsigned long)buf & alignment + || blk_rq_bytes(rq) & q->dma_pad_mask +- || object_is_on_stack(buf)) ++ || object_starts_on_stack(buf)) + drive->dma = 0; + } + } +diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c +index fefbdfc..62ff465 100644 +--- a/drivers/ide/ide-floppy.c ++++ b/drivers/ide/ide-floppy.c +@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) + u8 pc_buf[256], header_len, desc_cnt; + int i, rc = 1, blocks, length; + ++ pax_track_stack(); ++ + ide_debug_log(IDE_DBG_FUNC, "enter"); + + drive->bios_cyl = 0; +diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c +index 39d4e01..11538ce 100644 +--- a/drivers/ide/ide-pci-generic.c ++++ b/drivers/ide/ide-pci-generic.c +@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = { + .udma_mask = ATA_UDMA6, \ + } + +-static const struct ide_port_info generic_chipsets[] __devinitdata = { ++static const struct ide_port_info generic_chipsets[] __devinitconst = { + /* 0: Unknown */ + DECLARE_GENERIC_PCI_DEV(0), + +diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c +index 0d266a5..aaca790 100644 +--- a/drivers/ide/it8172.c ++++ b/drivers/ide/it8172.c +@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = { + .set_dma_mode = it8172_set_dma_mode, + }; + +-static const struct ide_port_info it8172_port_info __devinitdata = { ++static const struct ide_port_info it8172_port_info __devinitconst = { + .name = DRV_NAME, + .port_ops = &it8172_port_ops, + .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} }, +diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c +index 4797616..4be488a 100644 +--- a/drivers/ide/it8213.c ++++ b/drivers/ide/it8213.c +@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = { + .cable_detect = it8213_cable_detect, + }; + +-static const struct ide_port_info it8213_chipset __devinitdata = { ++static const struct ide_port_info it8213_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { {0x41, 0x80, 0x80} }, + .port_ops = &it8213_port_ops, +diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c +index 51aa745..146ee60 100644 +--- a/drivers/ide/it821x.c ++++ b/drivers/ide/it821x.c +@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = { + .cable_detect = it821x_cable_detect, + }; + +-static const struct ide_port_info it821x_chipset __devinitdata = { ++static const struct ide_port_info it821x_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_it821x, + .init_hwif = init_hwif_it821x, +diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c +index bf2be64..9270098 100644 +--- a/drivers/ide/jmicron.c ++++ b/drivers/ide/jmicron.c +@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = { + .cable_detect = jmicron_cable_detect, + }; + +-static const struct ide_port_info jmicron_chipset __devinitdata = { ++static const struct ide_port_info jmicron_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } }, + .port_ops = &jmicron_port_ops, +diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c +index 95327a2..73f78d8 100644 +--- a/drivers/ide/ns87415.c ++++ b/drivers/ide/ns87415.c +@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = { + .dma_sff_read_status = superio_dma_sff_read_status, + }; + +-static const struct ide_port_info ns87415_chipset __devinitdata = { ++static const struct ide_port_info ns87415_chipset __devinitconst = { + .name = DRV_NAME, + .init_hwif = init_hwif_ns87415, + .tp_ops = &ns87415_tp_ops, +diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c +index f1d70d6..e1de05b 100644 +--- a/drivers/ide/opti621.c ++++ b/drivers/ide/opti621.c +@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = { + .set_pio_mode = opti621_set_pio_mode, + }; + +-static const struct ide_port_info opti621_chipset __devinitdata = { ++static const struct ide_port_info opti621_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} }, + .port_ops = &opti621_port_ops, +diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c +index 65ba823..7311f4d 100644 +--- a/drivers/ide/pdc202xx_new.c ++++ b/drivers/ide/pdc202xx_new.c +@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = { + .udma_mask = udma, \ + } + +-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = { ++static const struct ide_port_info pdcnew_chipsets[] __devinitconst = { + /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5), + /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6), + }; +diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c +index cb812f3..af816ef 100644 +--- a/drivers/ide/pdc202xx_old.c ++++ b/drivers/ide/pdc202xx_old.c +@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = { + .max_sectors = sectors, \ + } + +-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { ++static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = { + { /* 0: PDC20246 */ + .name = DRV_NAME, + .init_chipset = init_chipset_pdc202xx, +diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c +index bf14f39..15c4b98 100644 +--- a/drivers/ide/piix.c ++++ b/drivers/ide/piix.c +@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = { + .udma_mask = udma, \ + } + +-static const struct ide_port_info piix_pci_info[] __devinitdata = { ++static const struct ide_port_info piix_pci_info[] __devinitconst = { + /* 0: MPIIX */ + { /* + * MPIIX actually has only a single IDE channel mapped to +diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c +index a6414a8..c04173e 100644 +--- a/drivers/ide/rz1000.c ++++ b/drivers/ide/rz1000.c +@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev) + } + } + +-static const struct ide_port_info rz1000_chipset __devinitdata = { ++static const struct ide_port_info rz1000_chipset __devinitconst = { + .name = DRV_NAME, + .host_flags = IDE_HFLAG_NO_DMA, + }; +diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c +index d467478..9203942 100644 +--- a/drivers/ide/sc1200.c ++++ b/drivers/ide/sc1200.c +@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info sc1200_chipset __devinitdata = { ++static const struct ide_port_info sc1200_chipset __devinitconst = { + .name = DRV_NAME, + .port_ops = &sc1200_port_ops, + .dma_ops = &sc1200_dma_ops, +diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c +index 1104bb3..59c5194 100644 +--- a/drivers/ide/scc_pata.c ++++ b/drivers/ide/scc_pata.c +@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = { + .dma_sff_read_status = scc_dma_sff_read_status, + }; + +-static const struct ide_port_info scc_chipset __devinitdata = { ++static const struct ide_port_info scc_chipset __devinitconst = { + .name = "sccIDE", + .init_iops = init_iops_scc, + .init_dma = scc_init_dma, +diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c +index b6554ef..6cc2cc3 100644 +--- a/drivers/ide/serverworks.c ++++ b/drivers/ide/serverworks.c +@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = { + .cable_detect = svwks_cable_detect, + }; + +-static const struct ide_port_info serverworks_chipsets[] __devinitdata = { ++static const struct ide_port_info serverworks_chipsets[] __devinitconst = { + { /* 0: OSB4 */ + .name = DRV_NAME, + .init_chipset = init_chipset_svwks, +diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c +index ab3db61..afed580 100644 +--- a/drivers/ide/setup-pci.c ++++ b/drivers/ide/setup-pci.c +@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, + int ret, i, n_ports = dev2 ? 4 : 2; + struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + ++ pax_track_stack(); ++ + for (i = 0; i < n_ports / 2; i++) { + ret = ide_setup_pci_controller(pdev[i], d, !i); + if (ret < 0) +diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c +index d95df52..0b03a39 100644 +--- a/drivers/ide/siimage.c ++++ b/drivers/ide/siimage.c +@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = { + .udma_mask = ATA_UDMA6, \ + } + +-static const struct ide_port_info siimage_chipsets[] __devinitdata = { ++static const struct ide_port_info siimage_chipsets[] __devinitconst = { + /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops), + /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops) + }; +diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c +index 3b88eba..ca8699d 100644 +--- a/drivers/ide/sis5513.c ++++ b/drivers/ide/sis5513.c +@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = { + .cable_detect = sis_cable_detect, + }; + +-static const struct ide_port_info sis5513_chipset __devinitdata = { ++static const struct ide_port_info sis5513_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_sis5513, + .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} }, +diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c +index d698da4..fca42a4 100644 +--- a/drivers/ide/sl82c105.c ++++ b/drivers/ide/sl82c105.c +@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info sl82c105_chipset __devinitdata = { ++static const struct ide_port_info sl82c105_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_sl82c105, + .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}}, +diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c +index 1ccfb40..83d5779 100644 +--- a/drivers/ide/slc90e66.c ++++ b/drivers/ide/slc90e66.c +@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = { + .cable_detect = slc90e66_cable_detect, + }; + +-static const struct ide_port_info slc90e66_chipset __devinitdata = { ++static const struct ide_port_info slc90e66_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} }, + .port_ops = &slc90e66_port_ops, +diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c +index 05a93d6..5f9e325 100644 +--- a/drivers/ide/tc86c001.c ++++ b/drivers/ide/tc86c001.c +@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info tc86c001_chipset __devinitdata = { ++static const struct ide_port_info tc86c001_chipset __devinitconst = { + .name = DRV_NAME, + .init_hwif = init_hwif_tc86c001, + .port_ops = &tc86c001_port_ops, +diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c +index 8773c3b..7907d6c 100644 +--- a/drivers/ide/triflex.c ++++ b/drivers/ide/triflex.c +@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = { + .set_dma_mode = triflex_set_mode, + }; + +-static const struct ide_port_info triflex_device __devinitdata = { ++static const struct ide_port_info triflex_device __devinitconst = { + .name = DRV_NAME, + .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}}, + .port_ops = &triflex_port_ops, +diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c +index 4b42ca0..e494a98 100644 +--- a/drivers/ide/trm290.c ++++ b/drivers/ide/trm290.c +@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = { + .dma_check = trm290_dma_check, + }; + +-static const struct ide_port_info trm290_chipset __devinitdata = { ++static const struct ide_port_info trm290_chipset __devinitconst = { + .name = DRV_NAME, + .init_hwif = init_hwif_trm290, + .tp_ops = &trm290_tp_ops, +diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c +index 028de26..520d5d5 100644 +--- a/drivers/ide/via82cxxx.c ++++ b/drivers/ide/via82cxxx.c +@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = { + .cable_detect = via82cxxx_cable_detect, + }; + +-static const struct ide_port_info via82cxxx_chipset __devinitdata = { ++static const struct ide_port_info via82cxxx_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_via82cxxx, + .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, +diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c +index 2cd00b5..14de699 100644 +--- a/drivers/ieee1394/dv1394.c ++++ b/drivers/ieee1394/dv1394.c +@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame) + based upon DIF section and sequence + */ + +-static void inline ++static inline void + frame_put_packet (struct frame *f, struct packet *p) + { + int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */ +diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c +index e947d8f..6a966b9 100644 +--- a/drivers/ieee1394/hosts.c ++++ b/drivers/ieee1394/hosts.c +@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, + } + + static struct hpsb_host_driver dummy_driver = { ++ .name = "dummy", + .transmit_packet = dummy_transmit_packet, + .devctl = dummy_devctl, + .isoctl = dummy_isoctl +diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c +index ddaab6e..8d37435 100644 +--- a/drivers/ieee1394/init_ohci1394_dma.c ++++ b/drivers/ieee1394/init_ohci1394_dma.c +@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void) + for (func = 0; func < 8; func++) { + u32 class = read_pci_config(num,slot,func, + PCI_CLASS_REVISION); +- if ((class == 0xffffffff)) ++ if (class == 0xffffffff) + continue; /* No device at this func */ + + if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) +diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c +index 65c1429..5d8c11f 100644 +--- a/drivers/ieee1394/ohci1394.c ++++ b/drivers/ieee1394/ohci1394.c +@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args) + printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) + + /* Module Parameters */ +-static int phys_dma = 1; ++static int phys_dma; + module_param(phys_dma, int, 0444); +-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1)."); ++MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0)."); + + static void dma_trm_tasklet(unsigned long data); + static void dma_trm_reset(struct dma_trm_ctx *d); +diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c +index f199896..78c9fc8 100644 +--- a/drivers/ieee1394/sbp2.c ++++ b/drivers/ieee1394/sbp2.c +@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver"); + MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME); + MODULE_LICENSE("GPL"); + +-static int sbp2_module_init(void) ++static int __init sbp2_module_init(void) + { + int ret; + +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index a5dea6b..0cefe8f 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS] + + struct cm_counter_group { + struct kobject obj; +- atomic_long_t counter[CM_ATTR_COUNT]; ++ atomic_long_unchecked_t counter[CM_ATTR_COUNT]; + }; + + struct cm_counter_attribute { +@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work, + struct ib_mad_send_buf *msg = NULL; + int ret; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REQ_COUNTER]); + + /* Quick state check to discard duplicate REQs. */ +@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work) + if (!cm_id_priv) + return; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REP_COUNTER]); + ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); + if (ret) +@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work) + if (cm_id_priv->id.state != IB_CM_REP_SENT && + cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { + spin_unlock_irq(&cm_id_priv->lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_RTU_COUNTER]); + goto out; + } +@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work) + cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, + dreq_msg->local_comm_id); + if (!cm_id_priv) { +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + cm_issue_drep(work->port, work->mad_recv_wc); + return -EINVAL; +@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work) + case IB_CM_MRA_REP_RCVD: + break; + case IB_CM_TIMEWAIT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work) + cm_free_msg(msg); + goto deref; + case IB_CM_DREQ_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + goto unlock; + default: +@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work) + ib_modify_mad(cm_id_priv->av.port->mad_agent, + cm_id_priv->msg, timeout)) { + if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) +- atomic_long_inc(&work->port-> ++ atomic_long_inc_unchecked(&work->port-> + counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + goto out; +@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work) + break; + case IB_CM_MRA_REQ_RCVD: + case IB_CM_MRA_REP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + /* fall through */ + default: +@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work) + case IB_CM_LAP_IDLE: + break; + case IB_CM_MRA_LAP_SENT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work) + cm_free_msg(msg); + goto deref; + case IB_CM_LAP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + goto unlock; + default: +@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work) + cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); + if (cur_cm_id_priv) { + spin_unlock_irq(&cm.lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_SIDR_REQ_COUNTER]); + goto out; /* Duplicate message. */ + } +@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, + if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) + msg->retries = 1; + +- atomic_long_add(1 + msg->retries, ++ atomic_long_add_unchecked(1 + msg->retries, + &port->counter_group[CM_XMIT].counter[attr_index]); + if (msg->retries) +- atomic_long_add(msg->retries, ++ atomic_long_add_unchecked(msg->retries, + &port->counter_group[CM_XMIT_RETRIES]. + counter[attr_index]); + +@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, + } + + attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); +- atomic_long_inc(&port->counter_group[CM_RECV]. ++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. + counter[attr_id - CM_ATTR_ID_OFFSET]); + + work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, +@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, + cm_attr = container_of(attr, struct cm_counter_attribute, attr); + + return sprintf(buf, "%ld\n", +- atomic_long_read(&group->counter[cm_attr->index])); ++ atomic_long_read_unchecked(&group->counter[cm_attr->index])); + } + +-static struct sysfs_ops cm_counter_ops = { ++static const struct sysfs_ops cm_counter_ops = { + .show = cm_show_counter + }; + +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 8fd3a6f..61d8075 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, + + req.private_data_len = sizeof(struct cma_hdr) + + conn_param->private_data_len; ++ if (req.private_data_len < conn_param->private_data_len) ++ return -EINVAL; ++ + req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); + if (!req.private_data) + return -ENOMEM; +@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, + memset(&req, 0, sizeof req); + offset = cma_user_data_offset(id_priv->id.ps); + req.private_data_len = offset + conn_param->private_data_len; ++ if (req.private_data_len < conn_param->private_data_len) ++ return -EINVAL; ++ + private_data = kzalloc(req.private_data_len, GFP_ATOMIC); + if (!private_data) + return -ENOMEM; +diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c +index 4507043..14ad522 100644 +--- a/drivers/infiniband/core/fmr_pool.c ++++ b/drivers/infiniband/core/fmr_pool.c +@@ -97,8 +97,8 @@ struct ib_fmr_pool { + + struct task_struct *thread; + +- atomic_t req_ser; +- atomic_t flush_ser; ++ atomic_unchecked_t req_ser; ++ atomic_unchecked_t flush_ser; + + wait_queue_head_t force_wait; + }; +@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) + struct ib_fmr_pool *pool = pool_ptr; + + do { +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) { + ib_fmr_batch_release(pool); + +- atomic_inc(&pool->flush_ser); ++ atomic_inc_unchecked(&pool->flush_ser); + wake_up_interruptible(&pool->force_wait); + + if (pool->flush_function) +@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) + } + + set_current_state(TASK_INTERRUPTIBLE); +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 && + !kthread_should_stop()) + schedule(); + __set_current_state(TASK_RUNNING); +@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, + pool->dirty_watermark = params->dirty_watermark; + pool->dirty_len = 0; + spin_lock_init(&pool->pool_lock); +- atomic_set(&pool->req_ser, 0); +- atomic_set(&pool->flush_ser, 0); ++ atomic_set_unchecked(&pool->req_ser, 0); ++ atomic_set_unchecked(&pool->flush_ser, 0); + init_waitqueue_head(&pool->force_wait); + + pool->thread = kthread_run(ib_fmr_cleanup_thread, +@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool) + } + spin_unlock_irq(&pool->pool_lock); + +- serial = atomic_inc_return(&pool->req_ser); ++ serial = atomic_inc_return_unchecked(&pool->req_ser); + wake_up_process(pool->thread); + + if (wait_event_interruptible(pool->force_wait, +- atomic_read(&pool->flush_ser) - serial >= 0)) ++ atomic_read_unchecked(&pool->flush_ser) - serial >= 0)) + return -EINTR; + + return 0; +@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) + } else { + list_add_tail(&fmr->list, &pool->dirty_list); + if (++pool->dirty_len >= pool->dirty_watermark) { +- atomic_inc(&pool->req_ser); ++ atomic_inc_unchecked(&pool->req_ser); + wake_up_process(pool->thread); + } + } +diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c +index 158a214..1558bb7 100644 +--- a/drivers/infiniband/core/sysfs.c ++++ b/drivers/infiniband/core/sysfs.c +@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj, + return port_attr->show(p, port_attr, buf); + } + +-static struct sysfs_ops port_sysfs_ops = { ++static const struct sysfs_ops port_sysfs_ops = { + .show = port_attr_show + }; + +diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c +index 5440da0..1194ecb 100644 +--- a/drivers/infiniband/core/uverbs_marshall.c ++++ b/drivers/infiniband/core/uverbs_marshall.c +@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, + dst->grh.sgid_index = src->grh.sgid_index; + dst->grh.hop_limit = src->grh.hop_limit; + dst->grh.traffic_class = src->grh.traffic_class; ++ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); + dst->dlid = src->dlid; + dst->sl = src->sl; + dst->src_path_bits = src->src_path_bits; + dst->static_rate = src->static_rate; + dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; + dst->port_num = src->port_num; ++ dst->reserved = 0; + } + EXPORT_SYMBOL(ib_copy_ah_attr_to_user); + + void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, + struct ib_qp_attr *src) + { ++ dst->qp_state = src->qp_state; + dst->cur_qp_state = src->cur_qp_state; + dst->path_mtu = src->path_mtu; + dst->path_mig_state = src->path_mig_state; +@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, + dst->rnr_retry = src->rnr_retry; + dst->alt_port_num = src->alt_port_num; + dst->alt_timeout = src->alt_timeout; ++ memset(dst->reserved, 0, sizeof(dst->reserved)); + } + EXPORT_SYMBOL(ib_copy_qp_attr_to_user); + +diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c +index 100da85..62e6b88 100644 +--- a/drivers/infiniband/hw/ipath/ipath_fs.c ++++ b/drivers/infiniband/hw/ipath/ipath_fs.c +@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf, + struct infinipath_counters counters; + struct ipath_devdata *dd; + ++ pax_track_stack(); ++ + dd = file->f_path.dentry->d_inode->i_private; + dd->ipath_f_read_counters(dd, &counters); + +diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c +index cbde0cf..afaf55c 100644 +--- a/drivers/infiniband/hw/nes/nes.c ++++ b/drivers/infiniband/hw/nes/nes.c +@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); + LIST_HEAD(nes_adapter_list); + static LIST_HEAD(nes_dev_list); + +-atomic_t qps_destroyed; ++atomic_unchecked_t qps_destroyed; + + static unsigned int ee_flsh_adapter; + static unsigned int sysfs_nonidx_addr; +@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r + struct nes_adapter *nesadapter = nesdev->nesadapter; + u32 qp_id; + +- atomic_inc(&qps_destroyed); ++ atomic_inc_unchecked(&qps_destroyed); + + /* Free the control structures */ + +diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h +index bcc6abc..9c76b2f 100644 +--- a/drivers/infiniband/hw/nes/nes.h ++++ b/drivers/infiniband/hw/nes/nes.h +@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level; + extern unsigned int wqm_quanta; + extern struct list_head nes_adapter_list; + +-extern atomic_t cm_connects; +-extern atomic_t cm_accepts; +-extern atomic_t cm_disconnects; +-extern atomic_t cm_closes; +-extern atomic_t cm_connecteds; +-extern atomic_t cm_connect_reqs; +-extern atomic_t cm_rejects; +-extern atomic_t mod_qp_timouts; +-extern atomic_t qps_created; +-extern atomic_t qps_destroyed; +-extern atomic_t sw_qps_destroyed; ++extern atomic_unchecked_t cm_connects; ++extern atomic_unchecked_t cm_accepts; ++extern atomic_unchecked_t cm_disconnects; ++extern atomic_unchecked_t cm_closes; ++extern atomic_unchecked_t cm_connecteds; ++extern atomic_unchecked_t cm_connect_reqs; ++extern atomic_unchecked_t cm_rejects; ++extern atomic_unchecked_t mod_qp_timouts; ++extern atomic_unchecked_t qps_created; ++extern atomic_unchecked_t qps_destroyed; ++extern atomic_unchecked_t sw_qps_destroyed; + extern u32 mh_detected; + extern u32 mh_pauses_sent; + extern u32 cm_packets_sent; +@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans; + extern u32 cm_listens_created; + extern u32 cm_listens_destroyed; + extern u32 cm_backlog_drops; +-extern atomic_t cm_loopbacks; +-extern atomic_t cm_nodes_created; +-extern atomic_t cm_nodes_destroyed; +-extern atomic_t cm_accel_dropped_pkts; +-extern atomic_t cm_resets_recvd; ++extern atomic_unchecked_t cm_loopbacks; ++extern atomic_unchecked_t cm_nodes_created; ++extern atomic_unchecked_t cm_nodes_destroyed; ++extern atomic_unchecked_t cm_accel_dropped_pkts; ++extern atomic_unchecked_t cm_resets_recvd; + + extern u32 int_mod_timer_init; + extern u32 int_mod_cq_depth_256; +diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c +index 73473db..5ed06e8 100644 +--- a/drivers/infiniband/hw/nes/nes_cm.c ++++ b/drivers/infiniband/hw/nes/nes_cm.c +@@ -69,11 +69,11 @@ u32 cm_packets_received; + u32 cm_listens_created; + u32 cm_listens_destroyed; + u32 cm_backlog_drops; +-atomic_t cm_loopbacks; +-atomic_t cm_nodes_created; +-atomic_t cm_nodes_destroyed; +-atomic_t cm_accel_dropped_pkts; +-atomic_t cm_resets_recvd; ++atomic_unchecked_t cm_loopbacks; ++atomic_unchecked_t cm_nodes_created; ++atomic_unchecked_t cm_nodes_destroyed; ++atomic_unchecked_t cm_accel_dropped_pkts; ++atomic_unchecked_t cm_resets_recvd; + + static inline int mini_cm_accelerated(struct nes_cm_core *, + struct nes_cm_node *); +@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = { + + static struct nes_cm_core *g_cm_core; + +-atomic_t cm_connects; +-atomic_t cm_accepts; +-atomic_t cm_disconnects; +-atomic_t cm_closes; +-atomic_t cm_connecteds; +-atomic_t cm_connect_reqs; +-atomic_t cm_rejects; ++atomic_unchecked_t cm_connects; ++atomic_unchecked_t cm_accepts; ++atomic_unchecked_t cm_disconnects; ++atomic_unchecked_t cm_closes; ++atomic_unchecked_t cm_connecteds; ++atomic_unchecked_t cm_connect_reqs; ++atomic_unchecked_t cm_rejects; + + + /** +@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, + cm_node->rem_mac); + + add_hte_node(cm_core, cm_node); +- atomic_inc(&cm_nodes_created); ++ atomic_inc_unchecked(&cm_nodes_created); + + return cm_node; + } +@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, + } + + atomic_dec(&cm_core->node_cnt); +- atomic_inc(&cm_nodes_destroyed); ++ atomic_inc_unchecked(&cm_nodes_destroyed); + nesqp = cm_node->nesqp; + if (nesqp) { + nesqp->cm_node = NULL; +@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, + + static void drop_packet(struct sk_buff *skb) + { +- atomic_inc(&cm_accel_dropped_pkts); ++ atomic_inc_unchecked(&cm_accel_dropped_pkts); + dev_kfree_skb_any(skb); + } + +@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, + + int reset = 0; /* whether to send reset in case of err.. */ + int passive_state; +- atomic_inc(&cm_resets_recvd); ++ atomic_inc_unchecked(&cm_resets_recvd); + nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." + " refcnt=%d\n", cm_node, cm_node->state, + atomic_read(&cm_node->ref_count)); +@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, + rem_ref_cm_node(cm_node->cm_core, cm_node); + return NULL; + } +- atomic_inc(&cm_loopbacks); ++ atomic_inc_unchecked(&cm_loopbacks); + loopbackremotenode->loopbackpartner = cm_node; + loopbackremotenode->tcp_cntxt.rcv_wscale = + NES_CM_DEFAULT_RCV_WND_SCALE; +@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, + add_ref_cm_node(cm_node); + } else if (cm_node->state == NES_CM_STATE_TSA) { + rem_ref_cm_node(cm_core, cm_node); +- atomic_inc(&cm_accel_dropped_pkts); ++ atomic_inc_unchecked(&cm_accel_dropped_pkts); + dev_kfree_skb_any(skb); + break; + } +@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) + + if ((cm_id) && (cm_id->event_handler)) { + if (issue_disconn) { +- atomic_inc(&cm_disconnects); ++ atomic_inc_unchecked(&cm_disconnects); + cm_event.event = IW_CM_EVENT_DISCONNECT; + cm_event.status = disconn_status; + cm_event.local_addr = cm_id->local_addr; +@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) + } + + if (issue_close) { +- atomic_inc(&cm_closes); ++ atomic_inc_unchecked(&cm_closes); + nes_disconnect(nesqp, 1); + + cm_id->provider_data = nesqp; +@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + + nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", + nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); +- atomic_inc(&cm_accepts); ++ atomic_inc_unchecked(&cm_accepts); + + nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", + atomic_read(&nesvnic->netdev->refcnt)); +@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) + + struct nes_cm_core *cm_core; + +- atomic_inc(&cm_rejects); ++ atomic_inc_unchecked(&cm_rejects); + cm_node = (struct nes_cm_node *) cm_id->provider_data; + loopback = cm_node->loopbackpartner; + cm_core = cm_node->cm_core; +@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + ntohl(cm_id->local_addr.sin_addr.s_addr), + ntohs(cm_id->local_addr.sin_port)); + +- atomic_inc(&cm_connects); ++ atomic_inc_unchecked(&cm_connects); + nesqp->active_conn = 1; + + /* cache the cm_id in the qp */ +@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event) + if (nesqp->destroyed) { + return; + } +- atomic_inc(&cm_connecteds); ++ atomic_inc_unchecked(&cm_connecteds); + nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" + " local port 0x%04X. jiffies = %lu.\n", + nesqp->hwqp.qp_id, +@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event) + + ret = cm_id->event_handler(cm_id, &cm_event); + cm_id->add_ref(cm_id); +- atomic_inc(&cm_closes); ++ atomic_inc_unchecked(&cm_closes); + cm_event.event = IW_CM_EVENT_CLOSE; + cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.provider_data = cm_id->provider_data; +@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) + return; + cm_id = cm_node->cm_id; + +- atomic_inc(&cm_connect_reqs); ++ atomic_inc_unchecked(&cm_connect_reqs); + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", + cm_node, cm_id, jiffies); + +@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) + return; + cm_id = cm_node->cm_id; + +- atomic_inc(&cm_connect_reqs); ++ atomic_inc_unchecked(&cm_connect_reqs); + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", + cm_node, cm_id, jiffies); + +diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c +index e593af3..870694a 100644 +--- a/drivers/infiniband/hw/nes/nes_nic.c ++++ b/drivers/infiniband/hw/nes/nes_nic.c +@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, + target_stat_values[++index] = mh_detected; + target_stat_values[++index] = mh_pauses_sent; + target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; +- target_stat_values[++index] = atomic_read(&cm_connects); +- target_stat_values[++index] = atomic_read(&cm_accepts); +- target_stat_values[++index] = atomic_read(&cm_disconnects); +- target_stat_values[++index] = atomic_read(&cm_connecteds); +- target_stat_values[++index] = atomic_read(&cm_connect_reqs); +- target_stat_values[++index] = atomic_read(&cm_rejects); +- target_stat_values[++index] = atomic_read(&mod_qp_timouts); +- target_stat_values[++index] = atomic_read(&qps_created); +- target_stat_values[++index] = atomic_read(&sw_qps_destroyed); +- target_stat_values[++index] = atomic_read(&qps_destroyed); +- target_stat_values[++index] = atomic_read(&cm_closes); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connects); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects); ++ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts); ++ target_stat_values[++index] = atomic_read_unchecked(&qps_created); ++ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_closes); + target_stat_values[++index] = cm_packets_sent; + target_stat_values[++index] = cm_packets_bounced; + target_stat_values[++index] = cm_packets_created; +@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, + target_stat_values[++index] = cm_listens_created; + target_stat_values[++index] = cm_listens_destroyed; + target_stat_values[++index] = cm_backlog_drops; +- target_stat_values[++index] = atomic_read(&cm_loopbacks); +- target_stat_values[++index] = atomic_read(&cm_nodes_created); +- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); +- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); +- target_stat_values[++index] = atomic_read(&cm_resets_recvd); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd); + target_stat_values[++index] = int_mod_timer_init; + target_stat_values[++index] = int_mod_cq_depth_1; + target_stat_values[++index] = int_mod_cq_depth_4; +diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c +index a680c42..f914deb 100644 +--- a/drivers/infiniband/hw/nes/nes_verbs.c ++++ b/drivers/infiniband/hw/nes/nes_verbs.c +@@ -45,9 +45,9 @@ + + #include <rdma/ib_umem.h> + +-atomic_t mod_qp_timouts; +-atomic_t qps_created; +-atomic_t sw_qps_destroyed; ++atomic_unchecked_t mod_qp_timouts; ++atomic_unchecked_t qps_created; ++atomic_unchecked_t sw_qps_destroyed; + + static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); + +@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, + if (init_attr->create_flags) + return ERR_PTR(-EINVAL); + +- atomic_inc(&qps_created); ++ atomic_inc_unchecked(&qps_created); + switch (init_attr->qp_type) { + case IB_QPT_RC: + if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { +@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) + struct iw_cm_event cm_event; + int ret; + +- atomic_inc(&sw_qps_destroyed); ++ atomic_inc_unchecked(&sw_qps_destroyed); + nesqp->destroyed = 1; + + /* Blow away the connection if it exists. */ +diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c +index ac11be0..3883c04 100644 +--- a/drivers/input/gameport/gameport.c ++++ b/drivers/input/gameport/gameport.c +@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys); + */ + static void gameport_init_port(struct gameport *gameport) + { +- static atomic_t gameport_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0); + + __module_get(THIS_MODULE); + + mutex_init(&gameport->drv_mutex); + device_initialize(&gameport->dev); +- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1); ++ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1); + gameport->dev.bus = &gameport_bus; + gameport->dev.release = gameport_release_port; + if (gameport->parent) +diff --git a/drivers/input/input.c b/drivers/input/input.c +index c82ae82..8cfb9cb 100644 +--- a/drivers/input/input.c ++++ b/drivers/input/input.c +@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability); + */ + int input_register_device(struct input_dev *dev) + { +- static atomic_t input_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t input_no = ATOMIC_INIT(0); + struct input_handler *handler; + const char *path; + int error; +@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev) + dev->setkeycode = input_default_setkeycode; + + dev_set_name(&dev->dev, "input%ld", +- (unsigned long) atomic_inc_return(&input_no) - 1); ++ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1); + + error = device_add(&dev->dev); + if (error) +diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c +index ca13a6b..b032b0c 100644 +--- a/drivers/input/joystick/sidewinder.c ++++ b/drivers/input/joystick/sidewinder.c +@@ -30,6 +30,7 @@ + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/slab.h> ++#include <linux/sched.h> + #include <linux/init.h> + #include <linux/input.h> + #include <linux/gameport.h> +@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw) + unsigned char buf[SW_LENGTH]; + int i; + ++ pax_track_stack(); ++ + i = sw_read_packet(sw->gameport, buf, sw->length, 0); + + if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */ +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 79e3edc..01412b9 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev, + + static int xpad_led_probe(struct usb_xpad *xpad) + { +- static atomic_t led_seq = ATOMIC_INIT(0); ++ static atomic_unchecked_t led_seq = ATOMIC_INIT(0); + long led_no; + struct xpad_led *led; + struct led_classdev *led_cdev; +@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad) + if (!led) + return -ENOMEM; + +- led_no = (long)atomic_inc_return(&led_seq) - 1; ++ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1; + + snprintf(led->name, sizeof(led->name), "xpad%ld", led_no); + led->xpad = xpad; +diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c +index 0236f0d..c7327f1 100644 +--- a/drivers/input/serio/serio.c ++++ b/drivers/input/serio/serio.c +@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev) + */ + static void serio_init_port(struct serio *serio) + { +- static atomic_t serio_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t serio_no = ATOMIC_INIT(0); + + __module_get(THIS_MODULE); + +@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio) + mutex_init(&serio->drv_mutex); + device_initialize(&serio->dev); + dev_set_name(&serio->dev, "serio%ld", +- (long)atomic_inc_return(&serio_no) - 1); ++ (long)atomic_inc_return_unchecked(&serio_no) - 1); + serio->dev.bus = &serio_bus; + serio->dev.release = serio_release_port; + if (serio->parent) { +diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c +index 33dcd8d..2783d25 100644 +--- a/drivers/isdn/gigaset/common.c ++++ b/drivers/isdn/gigaset/common.c +@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, + cs->commands_pending = 0; + cs->cur_at_seq = 0; + cs->gotfwver = -1; +- cs->open_count = 0; ++ local_set(&cs->open_count, 0); + cs->dev = NULL; + cs->tty = NULL; + cs->tty_dev = NULL; +diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h +index a2f6125..6a70677 100644 +--- a/drivers/isdn/gigaset/gigaset.h ++++ b/drivers/isdn/gigaset/gigaset.h +@@ -34,6 +34,7 @@ + #include <linux/tty_driver.h> + #include <linux/list.h> + #include <asm/atomic.h> ++#include <asm/local.h> + + #define GIG_VERSION {0,5,0,0} + #define GIG_COMPAT {0,4,0,0} +@@ -446,7 +447,7 @@ struct cardstate { + spinlock_t cmdlock; + unsigned curlen, cmdbytes; + +- unsigned open_count; ++ local_t open_count; + struct tty_struct *tty; + struct tasklet_struct if_wake_tasklet; + unsigned control_state; +diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c +index b3065b8..c7e8cc9 100644 +--- a/drivers/isdn/gigaset/interface.c ++++ b/drivers/isdn/gigaset/interface.c +@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp) + return -ERESTARTSYS; // FIXME -EINTR? + tty->driver_data = cs; + +- ++cs->open_count; +- +- if (cs->open_count == 1) { ++ if (local_inc_return(&cs->open_count) == 1) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = tty; + spin_unlock_irqrestore(&cs->lock, flags); +@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { +- if (!--cs->open_count) { ++ if (!local_dec_return(&cs->open_count)) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = NULL; + spin_unlock_irqrestore(&cs->lock, flags); +@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file, + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { + retval = 0; +@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) { + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty) + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) { + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { + //FIXME +@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { + //FIXME +@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old) + goto out; + } + +- if (!cs->open_count) { ++ if (!local_read(&cs->open_count)) { + dev_warn(cs->dev, "%s: device not opened\n", __func__); + goto out; + } +diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c +index a7c0083..62a7cb6 100644 +--- a/drivers/isdn/hardware/avm/b1.c ++++ b/drivers/isdn/hardware/avm/b1.c +@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file) + } + if (left) { + if (t4file->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof buf || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config) + } + if (left) { + if (config->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof buf || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c +index f130724..c373c68 100644 +--- a/drivers/isdn/hardware/eicon/capidtmf.c ++++ b/drivers/isdn/hardware/eicon/capidtmf.c +@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng + byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT]; + short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES]; + ++ pax_track_stack(); + + if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE) + { +diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c +index 4d425c6..a9be6c4 100644 +--- a/drivers/isdn/hardware/eicon/capifunc.c ++++ b/drivers/isdn/hardware/eicon/capifunc.c +@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void) + IDI_SYNC_REQ req; + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; + ++ pax_track_stack(); ++ + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); + + for (x = 0; x < MAX_DESCRIPTORS; x++) { +diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c +index 3029234..ef0d9e2 100644 +--- a/drivers/isdn/hardware/eicon/diddfunc.c ++++ b/drivers/isdn/hardware/eicon/diddfunc.c +@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) + IDI_SYNC_REQ req; + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; + ++ pax_track_stack(); ++ + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); + + for (x = 0; x < MAX_DESCRIPTORS; x++) { +diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c +index d36a4c0..11e7d1a 100644 +--- a/drivers/isdn/hardware/eicon/divasfunc.c ++++ b/drivers/isdn/hardware/eicon/divasfunc.c +@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) + IDI_SYNC_REQ req; + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; + ++ pax_track_stack(); ++ + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); + + for (x = 0; x < MAX_DESCRIPTORS; x++) { +diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h +index 85784a7..a19ca98 100644 +--- a/drivers/isdn/hardware/eicon/divasync.h ++++ b/drivers/isdn/hardware/eicon/divasync.h +@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter { + } diva_didd_add_adapter_t; + typedef struct _diva_didd_remove_adapter { + IDI_CALL p_request; +-} diva_didd_remove_adapter_t; ++} __no_const diva_didd_remove_adapter_t; + typedef struct _diva_didd_read_adapter_array { + void * buffer; + dword length; +diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c +index db87d51..7d09acf 100644 +--- a/drivers/isdn/hardware/eicon/idifunc.c ++++ b/drivers/isdn/hardware/eicon/idifunc.c +@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) + IDI_SYNC_REQ req; + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; + ++ pax_track_stack(); ++ + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); + + for (x = 0; x < MAX_DESCRIPTORS; x++) { +diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c +index ae89fb8..0fab299 100644 +--- a/drivers/isdn/hardware/eicon/message.c ++++ b/drivers/isdn/hardware/eicon/message.c +@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci) + dword d; + word w; + ++ pax_track_stack(); ++ + a = plci->adapter; + Id = ((word)plci->Id<<8)|a->Id; + PUT_WORD(&SS_Ind[4],0x0000); +@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info, + word j, n, w; + dword d; + ++ pax_track_stack(); ++ + + for(i=0;i<8;i++) bp_parms[i].length = 0; + for(i=0;i<2;i++) global_config[i].length = 0; +@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp) + const byte llc3[] = {4,3,2,2,6,6,0}; + const byte header[] = {0,2,3,3,0,0,0}; + ++ pax_track_stack(); ++ + for(i=0;i<8;i++) bp_parms[i].length = 0; + for(i=0;i<6;i++) b2_config_parms[i].length = 0; + for(i=0;i<5;i++) b3_config_parms[i].length = 0; +@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci) + word appl_number_group_type[MAX_APPL]; + PLCI *auxplci; + ++ pax_track_stack(); ++ + set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */ + + if(!a->group_optimization_enabled) +diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c +index a564b75..f3cf8b5 100644 +--- a/drivers/isdn/hardware/eicon/mntfunc.c ++++ b/drivers/isdn/hardware/eicon/mntfunc.c +@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) + IDI_SYNC_REQ req; + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; + ++ pax_track_stack(); ++ + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); + + for (x = 0; x < MAX_DESCRIPTORS; x++) { +diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h +index a3bd163..8956575 100644 +--- a/drivers/isdn/hardware/eicon/xdi_adapter.h ++++ b/drivers/isdn/hardware/eicon/xdi_adapter.h +@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t { + typedef struct _diva_os_idi_adapter_interface { + diva_init_card_proc_t cleanup_adapter_proc; + diva_cmd_card_proc_t cmd_proc; +-} diva_os_idi_adapter_interface_t; ++} __no_const diva_os_idi_adapter_interface_t; + + typedef struct _diva_os_xdi_adapter { + struct list_head link; +diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c +index adb1e8c..21b590b 100644 +--- a/drivers/isdn/i4l/isdn_common.c ++++ b/drivers/isdn/i4l/isdn_common.c +@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) + } iocpar; + void __user *argp = (void __user *)arg; + ++ pax_track_stack(); ++ + #define name iocpar.name + #define bname iocpar.bname + #define iocts iocpar.iocts +diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c +index 90b56ed..5ed3305 100644 +--- a/drivers/isdn/i4l/isdn_net.c ++++ b/drivers/isdn/i4l/isdn_net.c +@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, + { + isdn_net_local *lp = netdev_priv(dev); + unsigned char *p; +- ushort len = 0; ++ int len = 0; + + switch (lp->p_encap) { + case ISDN_NET_ENCAP_ETHER: +diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c +index bf7997a..cf091db 100644 +--- a/drivers/isdn/icn/icn.c ++++ b/drivers/isdn/icn/icn.c +@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card) + if (count > len) + count = len; + if (user) { +- if (copy_from_user(msg, buf, count)) ++ if (count > sizeof msg || copy_from_user(msg, buf, count)) + return -EFAULT; + } else + memcpy(msg, buf, count); +diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c +index feb0fa4..f76f830 100644 +--- a/drivers/isdn/mISDN/socket.c ++++ b/drivers/isdn/mISDN/socket.c +@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + if (dev) { + struct mISDN_devinfo di; + ++ memset(&di, 0, sizeof(di)); + di.id = dev->id; + di.Dprotocols = dev->Dprotocols; + di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); +@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + if (dev) { + struct mISDN_devinfo di; + ++ memset(&di, 0, sizeof(di)); + di.id = dev->id; + di.Dprotocols = dev->Dprotocols; + di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); +diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c +index 485be8b..f0225bc 100644 +--- a/drivers/isdn/sc/interrupt.c ++++ b/drivers/isdn/sc/interrupt.c +@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) + } + else if(callid>=0x0000 && callid<=0x7FFF) + { ++ int len; ++ + pr_debug("%s: Got Incoming Call\n", + sc_adapter[card]->devicename); +- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); +- strcpy(setup.eazmsn, +- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); ++ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]), ++ sizeof(setup.phone)); ++ if (len >= sizeof(setup.phone)) ++ continue; ++ len = strlcpy(setup.eazmsn, ++ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, ++ sizeof(setup.eazmsn)); ++ if (len >= sizeof(setup.eazmsn)) ++ continue; + setup.si1 = 7; + setup.si2 = 0; + setup.plan = 0; +@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) + * Handle a GetMyNumber Rsp + */ + if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ +- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); ++ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, ++ rcvmsg.msg_data.byte_array, ++ sizeof(rcvmsg.msg_data.byte_array)); + continue; + } + +diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c +index 8744d24..d1f9a9a 100644 +--- a/drivers/lguest/core.c ++++ b/drivers/lguest/core.c +@@ -91,9 +91,17 @@ static __init int map_switcher(void) + * it's worked so far. The end address needs +1 because __get_vm_area + * allocates an extra guard page, so we need space for that. + */ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, ++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR ++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#else + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, + VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#endif ++ + if (!switcher_vma) { + err = -ENOMEM; + printk("lguest: could not map switcher pages high\n"); +@@ -118,7 +126,7 @@ static __init int map_switcher(void) + * Now the Switcher is mapped at the right address, we can't fail! + * Copy in the compiled-in Switcher code (from <arch>_switcher.S). + */ +- memcpy(switcher_vma->addr, start_switcher_text, ++ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text), + end_switcher_text - start_switcher_text); + + printk(KERN_INFO "lguest: mapped switcher at %p\n", +diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c +index 6ae3888..8b38145 100644 +--- a/drivers/lguest/x86/core.c ++++ b/drivers/lguest/x86/core.c +@@ -59,7 +59,7 @@ static struct { + /* Offset from where switcher.S was compiled to where we've copied it */ + static unsigned long switcher_offset(void) + { +- return SWITCHER_ADDR - (unsigned long)start_switcher_text; ++ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text); + } + + /* This cpu's struct lguest_pages. */ +@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) + * These copies are pretty cheap, so we do them unconditionally: */ + /* Save the current Host top-level page directory. + */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pages->state.host_cr3 = read_cr3(); ++#else + pages->state.host_cr3 = __pa(current->mm->pgd); ++#endif ++ + /* + * Set up the Guest's page tables to see this CPU's pages (and no + * other CPU's pages). +@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void) + * compiled-in switcher code and the high-mapped copy we just made. + */ + for (i = 0; i < IDT_ENTRIES; i++) +- default_idt_entries[i] += switcher_offset(); ++ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset(); + + /* + * Set up the Switcher's per-cpu areas. +@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void) + * it will be undisturbed when we switch. To change %cs and jump we + * need this structure to feed to Intel's "lcall" instruction. + */ +- lguest_entry.offset = (long)switch_to_guest + switcher_offset(); ++ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset(); + lguest_entry.segment = LGUEST_CS; + + /* +diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S +index 40634b0..4f5855e 100644 +--- a/drivers/lguest/x86/switcher_32.S ++++ b/drivers/lguest/x86/switcher_32.S +@@ -87,6 +87,7 @@ + #include <asm/page.h> + #include <asm/segment.h> + #include <asm/lguest.h> ++#include <asm/processor-flags.h> + + // We mark the start of the code to copy + // It's placed in .text tho it's never run here +@@ -149,6 +150,13 @@ ENTRY(switch_to_guest) + // Changes type when we load it: damn Intel! + // For after we switch over our page tables + // That entry will be read-only: we'd crash. ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %edx ++ xor $X86_CR0_WP, %edx ++ mov %edx, %cr0 ++#endif ++ + movl $(GDT_ENTRY_TSS*8), %edx + ltr %dx + +@@ -157,9 +165,15 @@ ENTRY(switch_to_guest) + // Let's clear it again for our return. + // The GDT descriptor of the Host + // Points to the table after two "size" bytes +- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx ++ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax + // Clear "used" from type field (byte 5, bit 2) +- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) ++ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %eax ++ xor $X86_CR0_WP, %eax ++ mov %eax, %cr0 ++#endif + + // Once our page table's switched, the Guest is live! + // The Host fades as we run this final step. +@@ -295,13 +309,12 @@ deliver_to_host: + // I consulted gcc, and it gave + // These instructions, which I gladly credit: + leal (%edx,%ebx,8), %eax +- movzwl (%eax),%edx +- movl 4(%eax), %eax +- xorw %ax, %ax +- orl %eax, %edx ++ movl 4(%eax), %edx ++ movw (%eax), %dx + // Now the address of the handler's in %edx + // We call it now: its "iret" drops us home. +- jmp *%edx ++ ljmp $__KERNEL_CS, $1f ++1: jmp *%edx + + // Every interrupt can come to us here + // But we must truly tell each apart. +diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c +index 588a5b0..b71db89 100644 +--- a/drivers/macintosh/macio_asic.c ++++ b/drivers/macintosh/macio_asic.c +@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev) + * MacIO is matched against any Apple ID, it's probe() function + * will then decide wether it applies or not + */ +-static const struct pci_device_id __devinitdata pci_ids [] = { { ++static const struct pci_device_id __devinitconst pci_ids [] = { { + .vendor = PCI_VENDOR_ID_APPLE, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, +diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c +index a348bb0..ecd9b3f 100644 +--- a/drivers/macintosh/via-pmu-backlight.c ++++ b/drivers/macintosh/via-pmu-backlight.c +@@ -15,7 +15,7 @@ + + #define MAX_PMU_LEVEL 0xFF + +-static struct backlight_ops pmu_backlight_data; ++static const struct backlight_ops pmu_backlight_data; + static DEFINE_SPINLOCK(pmu_backlight_lock); + static int sleeping, uses_pmu_bl; + static u8 bl_curve[FB_BACKLIGHT_LEVELS]; +@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd) + return bd->props.brightness; + } + +-static struct backlight_ops pmu_backlight_data = { ++static const struct backlight_ops pmu_backlight_data = { + .get_brightness = pmu_backlight_get_brightness, + .update_status = pmu_backlight_update_status, + +diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c +index 6f308a4..b5f7ff7 100644 +--- a/drivers/macintosh/via-pmu.c ++++ b/drivers/macintosh/via-pmu.c +@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state) + && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0); + } + +-static struct platform_suspend_ops pmu_pm_ops = { ++static const struct platform_suspend_ops pmu_pm_ops = { + .enter = powerbook_sleep, + .valid = pmu_sleep_valid, + }; +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 818b617..4656e38 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param) + cmd == DM_LIST_VERSIONS_CMD) + return 0; + +- if ((cmd == DM_DEV_CREATE_CMD)) { ++ if (cmd == DM_DEV_CREATE_CMD) { + if (!*param->name) { + DMWARN("name not supplied when creating device"); + return -EINVAL; +diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c +index f1c8cae..59f0822 100644 +--- a/drivers/md/dm-log-userspace-transfer.c ++++ b/drivers/md/dm-log-userspace-transfer.c +@@ -133,7 +133,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) + { + struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); + +- if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) ++ if (!capable(CAP_SYS_ADMIN)) + return; + + spin_lock(&receiving_list_lock); +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index 6021d0a..a878643 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -41,7 +41,7 @@ enum dm_raid1_error { + + struct mirror { + struct mirror_set *ms; +- atomic_t error_count; ++ atomic_unchecked_t error_count; + unsigned long error_type; + struct dm_dev *dev; + sector_t offset; +@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) + * simple way to tell if a device has encountered + * errors. + */ +- atomic_inc(&m->error_count); ++ atomic_inc_unchecked(&m->error_count); + + if (test_and_set_bit(error_type, &m->error_type)) + return; +@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) + } + + for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) +- if (!atomic_read(&new->error_count)) { ++ if (!atomic_read_unchecked(&new->error_count)) { + set_default_mirror(new); + break; + } +@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) + struct mirror *m = get_default_mirror(ms); + + do { +- if (likely(!atomic_read(&m->error_count))) ++ if (likely(!atomic_read_unchecked(&m->error_count))) + return m; + + if (m-- == ms->mirror) +@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m) + { + struct mirror *default_mirror = get_default_mirror(m->ms); + +- return !atomic_read(&default_mirror->error_count); ++ return !atomic_read_unchecked(&default_mirror->error_count); + } + + static int mirror_available(struct mirror_set *ms, struct bio *bio) +@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) + */ + if (likely(region_in_sync(ms, region, 1))) + m = choose_mirror(ms, bio->bi_sector); +- else if (m && atomic_read(&m->error_count)) ++ else if (m && atomic_read_unchecked(&m->error_count)) + m = NULL; + + if (likely(m)) +@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, + } + + ms->mirror[mirror].ms = ms; +- atomic_set(&(ms->mirror[mirror].error_count), 0); ++ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0); + ms->mirror[mirror].error_type = 0; + ms->mirror[mirror].offset = offset; + +@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti) + */ + static char device_status_char(struct mirror *m) + { +- if (!atomic_read(&(m->error_count))) ++ if (!atomic_read_unchecked(&(m->error_count))) + return 'A'; + + return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : +diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c +index bd58703..9f26571 100644 +--- a/drivers/md/dm-stripe.c ++++ b/drivers/md/dm-stripe.c +@@ -20,7 +20,7 @@ struct stripe { + struct dm_dev *dev; + sector_t physical_start; + +- atomic_t error_count; ++ atomic_unchecked_t error_count; + }; + + struct stripe_c { +@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) + kfree(sc); + return r; + } +- atomic_set(&(sc->stripe[i].error_count), 0); ++ atomic_set_unchecked(&(sc->stripe[i].error_count), 0); + } + + ti->private = sc; +@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti, + DMEMIT("%d ", sc->stripes); + for (i = 0; i < sc->stripes; i++) { + DMEMIT("%s ", sc->stripe[i].dev->name); +- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? ++ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ? + 'D' : 'A'; + } + buffer[i] = '\0'; +@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, + */ + for (i = 0; i < sc->stripes; i++) + if (!strcmp(sc->stripe[i].dev->name, major_minor)) { +- atomic_inc(&(sc->stripe[i].error_count)); +- if (atomic_read(&(sc->stripe[i].error_count)) < ++ atomic_inc_unchecked(&(sc->stripe[i].error_count)); ++ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) < + DM_IO_ERROR_THRESHOLD) + queue_work(kstriped, &sc->kstriped_ws); + } +diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c +index 4b04590..13a77b2 100644 +--- a/drivers/md/dm-sysfs.c ++++ b/drivers/md/dm-sysfs.c +@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = { + NULL, + }; + +-static struct sysfs_ops dm_sysfs_ops = { ++static const struct sysfs_ops dm_sysfs_ops = { + .show = dm_attr_show, + }; + +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 03345bb..332250d 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, + if (!dev_size) + return 0; + +- if ((start >= dev_size) || (start + len > dev_size)) { ++ if ((start >= dev_size) || (len > dev_size - start)) { + DMWARN("%s: %s too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index c988ac2..c418141 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -165,9 +165,9 @@ struct mapped_device { + /* + * Event handling. + */ +- atomic_t event_nr; ++ atomic_unchecked_t event_nr; + wait_queue_head_t eventq; +- atomic_t uevent_seq; ++ atomic_unchecked_t uevent_seq; + struct list_head uevent_list; + spinlock_t uevent_lock; /* Protect access to uevent_list */ + +@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor) + rwlock_init(&md->map_lock); + atomic_set(&md->holders, 1); + atomic_set(&md->open_count, 0); +- atomic_set(&md->event_nr, 0); +- atomic_set(&md->uevent_seq, 0); ++ atomic_set_unchecked(&md->event_nr, 0); ++ atomic_set_unchecked(&md->uevent_seq, 0); + INIT_LIST_HEAD(&md->uevent_list); + spin_lock_init(&md->uevent_lock); + +@@ -1927,7 +1927,7 @@ static void event_callback(void *context) + + dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); + +- atomic_inc(&md->event_nr); ++ atomic_inc_unchecked(&md->event_nr); + wake_up(&md->eventq); + } + +@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, + + uint32_t dm_next_uevent_seq(struct mapped_device *md) + { +- return atomic_add_return(1, &md->uevent_seq); ++ return atomic_add_return_unchecked(1, &md->uevent_seq); + } + + uint32_t dm_get_event_nr(struct mapped_device *md) + { +- return atomic_read(&md->event_nr); ++ return atomic_read_unchecked(&md->event_nr); + } + + int dm_wait_event(struct mapped_device *md, int event_nr) + { + return wait_event_interruptible(md->eventq, +- (event_nr != atomic_read(&md->event_nr))); ++ (event_nr != atomic_read_unchecked(&md->event_nr))); + } + + void dm_uevent_add(struct mapped_device *md, struct list_head *elist) +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 4ce6e2f..7a9530a 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -153,10 +153,10 @@ static int start_readonly; + * start build, activate spare + */ + static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); +-static atomic_t md_event_count; ++static atomic_unchecked_t md_event_count; + void md_new_event(mddev_t *mddev) + { +- atomic_inc(&md_event_count); ++ atomic_inc_unchecked(&md_event_count); + wake_up(&md_event_waiters); + } + EXPORT_SYMBOL_GPL(md_new_event); +@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event); + */ + static void md_new_event_inintr(mddev_t *mddev) + { +- atomic_inc(&md_event_count); ++ atomic_inc_unchecked(&md_event_count); + wake_up(&md_event_waiters); + } + +@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) + + rdev->preferred_minor = 0xffff; + rdev->data_offset = le64_to_cpu(sb->data_offset); +- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); ++ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); + + rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; +@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) + else + sb->resync_offset = cpu_to_le64(0); + +- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); ++ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors)); + + sb->raid_disks = cpu_to_le32(mddev->raid_disks); + sb->size = cpu_to_le64(mddev->dev_sectors); +@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); + static ssize_t + errors_show(mdk_rdev_t *rdev, char *page) + { +- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); ++ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors)); + } + + static ssize_t +@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + if (*buf && (*e == 0 || *e == '\n')) { +- atomic_set(&rdev->corrected_errors, n); ++ atomic_set_unchecked(&rdev->corrected_errors, n); + return len; + } + return -EINVAL; +@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko) + mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); + kfree(rdev); + } +-static struct sysfs_ops rdev_sysfs_ops = { ++static const struct sysfs_ops rdev_sysfs_ops = { + .show = rdev_attr_show, + .store = rdev_attr_store, + }; +@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi + rdev->data_offset = 0; + rdev->sb_events = 0; + atomic_set(&rdev->nr_pending, 0); +- atomic_set(&rdev->read_errors, 0); +- atomic_set(&rdev->corrected_errors, 0); ++ atomic_set_unchecked(&rdev->read_errors, 0); ++ atomic_set_unchecked(&rdev->corrected_errors, 0); + + size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; + if (!size) { +@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko) + kfree(mddev); + } + +-static struct sysfs_ops md_sysfs_ops = { ++static const struct sysfs_ops md_sysfs_ops = { + .show = md_attr_show, + .store = md_attr_store, + }; +@@ -4482,7 +4482,8 @@ out: + err = 0; + blk_integrity_unregister(disk); + md_new_event(mddev); +- sysfs_notify_dirent(mddev->sysfs_state); ++ if (mddev->sysfs_state) ++ sysfs_notify_dirent(mddev->sysfs_state); + return err; + } + +@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v) + + spin_unlock(&pers_lock); + seq_printf(seq, "\n"); +- mi->event = atomic_read(&md_event_count); ++ mi->event = atomic_read_unchecked(&md_event_count); + return 0; + } + if (v == (void*)2) { +@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v) + chunk_kb ? "KB" : "B"); + if (bitmap->file) { + seq_printf(seq, ", file: "); +- seq_path(seq, &bitmap->file->f_path, " \t\n"); ++ seq_path(seq, &bitmap->file->f_path, " \t\n\\"); + } + + seq_printf(seq, "\n"); +@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file) + else { + struct seq_file *p = file->private_data; + p->private = mi; +- mi->event = atomic_read(&md_event_count); ++ mi->event = atomic_read_unchecked(&md_event_count); + } + return error; + } +@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait) + /* always allow read */ + mask = POLLIN | POLLRDNORM; + +- if (mi->event != atomic_read(&md_event_count)) ++ if (mi->event != atomic_read_unchecked(&md_event_count)) + mask |= POLLERR | POLLPRI; + return mask; + } +@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init) + struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + + (int)part_stat_read(&disk->part0, sectors[1]) - +- atomic_read(&disk->sync_io); ++ atomic_read_unchecked(&disk->sync_io); + /* sync IO will cause sync_io to increase before the disk_stats + * as sync_io is counted when a request starts, and + * disk_stats is counted when it completes. +diff --git a/drivers/md/md.h b/drivers/md/md.h +index 87430fe..0024a4c 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -94,10 +94,10 @@ struct mdk_rdev_s + * only maintained for arrays that + * support hot removal + */ +- atomic_t read_errors; /* number of consecutive read errors that ++ atomic_unchecked_t read_errors; /* number of consecutive read errors that + * we have tried to ignore. + */ +- atomic_t corrected_errors; /* number of corrected read errors, ++ atomic_unchecked_t corrected_errors; /* number of corrected read errors, + * for reporting to userspace and storing + * in superblock. + */ +@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) + + static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) + { +- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); ++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + } + + struct mdk_personality +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 968cb14..f0ad2e4 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) + if (r1_bio->bios[d]->bi_end_io != end_sync_read) + continue; + rdev = conf->mirrors[d].rdev; +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + if (sync_page_io(rdev->bdev, + sect + rdev->data_offset, + s<<9, +@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk, + /* Well, this device is dead */ + md_error(mddev, rdev); + else { +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + printk(KERN_INFO + "raid1:%s: read error corrected " + "(%d sectors at %llu on %s)\n", +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 1b4e232..cf0f534b 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error) + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + set_bit(R10BIO_Uptodate, &r10_bio->state); + else { +- atomic_add(r10_bio->sectors, ++ atomic_add_unchecked(r10_bio->sectors, + &conf->mirrors[d].rdev->corrected_errors); + if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) + md_error(r10_bio->mddev, +@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) + test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + if (sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 883215d..675bf47 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) + bi->bi_next = NULL; + if ((rw & WRITE) && + test_bit(R5_ReWrite, &sh->dev[i].flags)) +- atomic_add(STRIPE_SECTORS, ++ atomic_add_unchecked(STRIPE_SECTORS, + &rdev->corrected_errors); + generic_make_request(bi); + } else { +@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error) + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + } +- if (atomic_read(&conf->disks[i].rdev->read_errors)) +- atomic_set(&conf->disks[i].rdev->read_errors, 0); ++ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors)) ++ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0); + } else { + const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); + int retry = 0; + rdev = conf->disks[i].rdev; + + clear_bit(R5_UPTODATE, &sh->dev[i].flags); +- atomic_inc(&rdev->read_errors); ++ atomic_inc_unchecked(&rdev->read_errors); + if (conf->mddev->degraded >= conf->max_degraded) + printk_rl(KERN_WARNING + "raid5:%s: read error not correctable " +@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error) + (unsigned long long)(sh->sector + + rdev->data_offset), + bdn); +- else if (atomic_read(&rdev->read_errors) ++ else if (atomic_read_unchecked(&rdev->read_errors) + > conf->max_nr_stripes) + printk(KERN_WARNING + "raid5:%s: Too many read errors, failing device %s.\n", +@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) + sector_t r_sector; + struct stripe_head sh2; + ++ pax_track_stack(); + + chunk_offset = sector_div(new_sector, sectors_per_chunk); + stripe = new_sector; +diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c +index 05bde9c..2f31d40 100644 +--- a/drivers/media/common/saa7146_hlp.c ++++ b/drivers/media/common/saa7146_hlp.c +@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa + + int x[32], y[32], w[32], h[32]; + ++ pax_track_stack(); ++ + /* clear out memory */ + memset(&line_list[0], 0x00, sizeof(u32)*32); + memset(&pixel_list[0], 0x00, sizeof(u32)*32); +diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c +index cb22da5..82b686e 100644 +--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c ++++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c +@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb + u8 buf[HOST_LINK_BUF_SIZE]; + int i; + ++ pax_track_stack(); ++ + dprintk("%s\n", __func__); + + /* check if we have space for a link buf in the rx_buffer */ +@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, + unsigned long timeout; + int written; + ++ pax_track_stack(); ++ + dprintk("%s\n", __func__); + + /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ +diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h +index 2fe05d0..a3289c4 100644 +--- a/drivers/media/dvb/dvb-core/dvb_demux.h ++++ b/drivers/media/dvb/dvb-core/dvb_demux.h +@@ -71,7 +71,7 @@ struct dvb_demux_feed { + union { + dmx_ts_cb ts; + dmx_section_cb sec; +- } cb; ++ } __no_const cb; + + struct dvb_demux *demux; + void *priv; +diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c +index 94159b9..376bd8e 100644 +--- a/drivers/media/dvb/dvb-core/dvbdev.c ++++ b/drivers/media/dvb/dvb-core/dvbdev.c +@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, + const struct dvb_device *template, void *priv, int type) + { + struct dvb_device *dvbdev; +- struct file_operations *dvbdevfops; ++ file_operations_no_const *dvbdevfops; + struct device *clsdev; + int minor; + int id; +diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c +index 2a53dd0..db8c07a 100644 +--- a/drivers/media/dvb/dvb-usb/cxusb.c ++++ b/drivers/media/dvb/dvb-usb/cxusb.c +@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = { + struct dib0700_adapter_state { + int (*set_param_save) (struct dvb_frontend *, + struct dvb_frontend_parameters *); +-}; ++} __no_const; + + static int dib7070_set_param_override(struct dvb_frontend *fe, + struct dvb_frontend_parameters *fep) +diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c +index db7f7f7..f55e96f 100644 +--- a/drivers/media/dvb/dvb-usb/dib0700_core.c ++++ b/drivers/media/dvb/dvb-usb/dib0700_core.c +@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw + + u8 buf[260]; + ++ pax_track_stack(); ++ + while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) { + deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk); + +diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c +index 524acf5..5ffc403 100644 +--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c ++++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c +@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif + + struct dib0700_adapter_state { + int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *); +-}; ++} __no_const; + + /* Hauppauge Nova-T 500 (aka Bristol) + * has a LNA on GPIO0 which is enabled by setting 1 */ +diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h +index ba91735..4261d84 100644 +--- a/drivers/media/dvb/frontends/dib3000.h ++++ b/drivers/media/dvb/frontends/dib3000.h +@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops + int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff); + int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff); + int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl); +-}; ++} __no_const; + + #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE)) + extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config, +diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c +index c709ce6..b3fe620 100644 +--- a/drivers/media/dvb/frontends/or51211.c ++++ b/drivers/media/dvb/frontends/or51211.c +@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe, + u8 tudata[585]; + int i; + ++ pax_track_stack(); ++ + dprintk("Firmware is %zd bytes\n",fw->size); + + /* Get eprom data */ +diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c +index 482d0f3..ee1e202 100644 +--- a/drivers/media/radio/radio-cadet.c ++++ b/drivers/media/radio/radio-cadet.c +@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo + while (i < count && dev->rdsin != dev->rdsout) + readbuf[i++] = dev->rdsbuf[dev->rdsout++]; + +- if (copy_to_user(data, readbuf, i)) ++ if (i > sizeof readbuf || copy_to_user(data, readbuf, i)) + return -EFAULT; + return i; + } +diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c +index 6dd51e2..0359b92 100644 +--- a/drivers/media/video/cx18/cx18-driver.c ++++ b/drivers/media/video/cx18/cx18-driver.c +@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = { + + MODULE_DEVICE_TABLE(pci, cx18_pci_tbl); + +-static atomic_t cx18_instance = ATOMIC_INIT(0); ++static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0); + + /* Parameter declarations */ + static int cardtype[CX18_MAX_CARDS]; +@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) + struct i2c_client c; + u8 eedata[256]; + ++ pax_track_stack(); ++ + memset(&c, 0, sizeof(c)); + strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name)); + c.adapter = &cx->i2c_adap[0]; +@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev, + struct cx18 *cx; + + /* FIXME - module parameter arrays constrain max instances */ +- i = atomic_inc_return(&cx18_instance) - 1; ++ i = atomic_inc_return_unchecked(&cx18_instance) - 1; + if (i >= CX18_MAX_CARDS) { + printk(KERN_ERR "cx18: cannot manage card %d, driver has a " + "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1); +diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c +index 463ec34..2f4625a 100644 +--- a/drivers/media/video/ivtv/ivtv-driver.c ++++ b/drivers/media/video/ivtv/ivtv-driver.c +@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = { + MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); + + /* ivtv instance counter */ +-static atomic_t ivtv_instance = ATOMIC_INIT(0); ++static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0); + + /* Parameter declarations */ + static int cardtype[IVTV_MAX_CARDS]; +diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c +index 5fc4ac0..652a54a 100644 +--- a/drivers/media/video/omap24xxcam.c ++++ b/drivers/media/video/omap24xxcam.c +@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma, + spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags); + + do_gettimeofday(&vb->ts); +- vb->field_count = atomic_add_return(2, &fh->field_count); ++ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count); + if (csr & csr_error) { + vb->state = VIDEOBUF_ERROR; + if (!atomic_read(&fh->cam->in_reset)) { +diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h +index 2ce67f5..cf26a5b 100644 +--- a/drivers/media/video/omap24xxcam.h ++++ b/drivers/media/video/omap24xxcam.h +@@ -533,7 +533,7 @@ struct omap24xxcam_fh { + spinlock_t vbq_lock; /* spinlock for the videobuf queue */ + struct videobuf_queue vbq; + struct v4l2_pix_format pix; /* serialise pix by vbq->lock */ +- atomic_t field_count; /* field counter for videobuf_buffer */ ++ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */ + /* accessing cam here doesn't need serialisation: it's constant */ + struct omap24xxcam_device *cam; + }; +diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c +index 299afa4..eb47459 100644 +--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c ++++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c +@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw) + u8 *eeprom; + struct tveeprom tvdata; + ++ pax_track_stack(); ++ + memset(&tvdata,0,sizeof(tvdata)); + + eeprom = pvr2_eeprom_fetch(hdw); +diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h +index 5b152ff..3320638 100644 +--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h ++++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h +@@ -195,7 +195,7 @@ struct pvr2_hdw { + + /* I2C stuff */ + struct i2c_adapter i2c_adap; +- struct i2c_algorithm i2c_algo; ++ i2c_algorithm_no_const i2c_algo; + pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT]; + int i2c_cx25840_hack_state; + int i2c_linked; +diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c +index 1eabff6..8e2313a 100644 +--- a/drivers/media/video/saa7134/saa6752hs.c ++++ b/drivers/media/video/saa7134/saa6752hs.c +@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes) + unsigned char localPAT[256]; + unsigned char localPMT[256]; + ++ pax_track_stack(); ++ + /* Set video format - must be done first as it resets other settings */ + set_reg8(client, 0x41, h->video_format); + +diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c +index 9c1d3ac..b1b49e9 100644 +--- a/drivers/media/video/saa7164/saa7164-cmd.c ++++ b/drivers/media/video/saa7164/saa7164-cmd.c +@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev) + wait_queue_head_t *q = 0; + dprintk(DBGLVL_CMD, "%s()\n", __func__); + ++ pax_track_stack(); ++ + /* While any outstand message on the bus exists... */ + do { + +@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev) + u8 tmp[512]; + dprintk(DBGLVL_CMD, "%s()\n", __func__); + ++ pax_track_stack(); ++ + while (loop) { + + tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 }; +diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c +index b085496..cde0270 100644 +--- a/drivers/media/video/usbvideo/ibmcam.c ++++ b/drivers/media/video/usbvideo/ibmcam.c +@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = { + static int __init ibmcam_init(void) + { + struct usbvideo_cb cbTbl; +- memset(&cbTbl, 0, sizeof(cbTbl)); +- cbTbl.probe = ibmcam_probe; +- cbTbl.setupOnOpen = ibmcam_setup_on_open; +- cbTbl.videoStart = ibmcam_video_start; +- cbTbl.videoStop = ibmcam_video_stop; +- cbTbl.processData = ibmcam_ProcessIsocData; +- cbTbl.postProcess = usbvideo_DeinterlaceFrame; +- cbTbl.adjustPicture = ibmcam_adjust_picture; +- cbTbl.getFPS = ibmcam_calculate_fps; ++ memset((void *)&cbTbl, 0, sizeof(cbTbl)); ++ *(void **)&cbTbl.probe = ibmcam_probe; ++ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open; ++ *(void **)&cbTbl.videoStart = ibmcam_video_start; ++ *(void **)&cbTbl.videoStop = ibmcam_video_stop; ++ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData; ++ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame; ++ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture; ++ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps; + return usbvideo_register( + &cams, + MAX_IBMCAM, +diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c +index 31d57f2..600b735 100644 +--- a/drivers/media/video/usbvideo/konicawc.c ++++ b/drivers/media/video/usbvideo/konicawc.c +@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev + int error; + + usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname)); +- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname)); ++ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname)); + + cam->input = input_dev = input_allocate_device(); + if (!input_dev) { +@@ -935,16 +935,16 @@ static int __init konicawc_init(void) + struct usbvideo_cb cbTbl; + printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" + DRIVER_DESC "\n"); +- memset(&cbTbl, 0, sizeof(cbTbl)); +- cbTbl.probe = konicawc_probe; +- cbTbl.setupOnOpen = konicawc_setup_on_open; +- cbTbl.processData = konicawc_process_isoc; +- cbTbl.getFPS = konicawc_calculate_fps; +- cbTbl.setVideoMode = konicawc_set_video_mode; +- cbTbl.startDataPump = konicawc_start_data; +- cbTbl.stopDataPump = konicawc_stop_data; +- cbTbl.adjustPicture = konicawc_adjust_picture; +- cbTbl.userFree = konicawc_free_uvd; ++ memset((void * )&cbTbl, 0, sizeof(cbTbl)); ++ *(void **)&cbTbl.probe = konicawc_probe; ++ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open; ++ *(void **)&cbTbl.processData = konicawc_process_isoc; ++ *(void **)&cbTbl.getFPS = konicawc_calculate_fps; ++ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode; ++ *(void **)&cbTbl.startDataPump = konicawc_start_data; ++ *(void **)&cbTbl.stopDataPump = konicawc_stop_data; ++ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture; ++ *(void **)&cbTbl.userFree = konicawc_free_uvd; + return usbvideo_register( + &cams, + MAX_CAMERAS, +diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c +index 803d3e4..c4d1b96 100644 +--- a/drivers/media/video/usbvideo/quickcam_messenger.c ++++ b/drivers/media/video/usbvideo/quickcam_messenger.c +@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev) + int error; + + usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname)); +- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname)); ++ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname)); + + cam->input = input_dev = input_allocate_device(); + if (!input_dev) { +diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c +index fbd1b63..292f9f0 100644 +--- a/drivers/media/video/usbvideo/ultracam.c ++++ b/drivers/media/video/usbvideo/ultracam.c +@@ -655,14 +655,14 @@ static int __init ultracam_init(void) + { + struct usbvideo_cb cbTbl; + memset(&cbTbl, 0, sizeof(cbTbl)); +- cbTbl.probe = ultracam_probe; +- cbTbl.setupOnOpen = ultracam_setup_on_open; +- cbTbl.videoStart = ultracam_video_start; +- cbTbl.videoStop = ultracam_video_stop; +- cbTbl.processData = ultracam_ProcessIsocData; +- cbTbl.postProcess = usbvideo_DeinterlaceFrame; +- cbTbl.adjustPicture = ultracam_adjust_picture; +- cbTbl.getFPS = ultracam_calculate_fps; ++ *(void **)&cbTbl.probe = ultracam_probe; ++ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open; ++ *(void **)&cbTbl.videoStart = ultracam_video_start; ++ *(void **)&cbTbl.videoStop = ultracam_video_stop; ++ *(void **)&cbTbl.processData = ultracam_ProcessIsocData; ++ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame; ++ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture; ++ *(void **)&cbTbl.getFPS = ultracam_calculate_fps; + return usbvideo_register( + &cams, + MAX_CAMERAS, +diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c +index dea8b32..34f6878 100644 +--- a/drivers/media/video/usbvideo/usbvideo.c ++++ b/drivers/media/video/usbvideo/usbvideo.c +@@ -697,15 +697,15 @@ int usbvideo_register( + __func__, cams, base_size, num_cams); + + /* Copy callbacks, apply defaults for those that are not set */ +- memmove(&cams->cb, cbTbl, sizeof(cams->cb)); ++ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb)); + if (cams->cb.getFrame == NULL) +- cams->cb.getFrame = usbvideo_GetFrame; ++ *(void **)&cams->cb.getFrame = usbvideo_GetFrame; + if (cams->cb.disconnect == NULL) +- cams->cb.disconnect = usbvideo_Disconnect; ++ *(void **)&cams->cb.disconnect = usbvideo_Disconnect; + if (cams->cb.startDataPump == NULL) +- cams->cb.startDataPump = usbvideo_StartDataPump; ++ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump; + if (cams->cb.stopDataPump == NULL) +- cams->cb.stopDataPump = usbvideo_StopDataPump; ++ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump; + + cams->num_cameras = num_cams; + cams->cam = (struct uvd *) &cams[1]; +diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h +index c66985b..7fa143a 100644 +--- a/drivers/media/video/usbvideo/usbvideo.h ++++ b/drivers/media/video/usbvideo/usbvideo.h +@@ -268,7 +268,7 @@ struct usbvideo_cb { + int (*startDataPump)(struct uvd *uvd); + void (*stopDataPump)(struct uvd *uvd); + int (*setVideoMode)(struct uvd *uvd, struct video_window *vw); +-}; ++} __no_const; + + struct usbvideo { + int num_cameras; /* As allocated */ +diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c +index e0f91e4..37554ea 100644 +--- a/drivers/media/video/usbvision/usbvision-core.c ++++ b/drivers/media/video/usbvision/usbvision-core.c +@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision, + unsigned char rv, gv, bv; + static unsigned char *Y, *U, *V; + ++ pax_track_stack(); ++ + frame = usbvision->curFrame; + imageSize = frame->frmwidth * frame->frmheight; + if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) || +diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c +index 0d06e7c..3d17d24 100644 +--- a/drivers/media/video/v4l2-device.c ++++ b/drivers/media/video/v4l2-device.c +@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) + EXPORT_SYMBOL_GPL(v4l2_device_register); + + int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, +- atomic_t *instance) ++ atomic_unchecked_t *instance) + { +- int num = atomic_inc_return(instance) - 1; ++ int num = atomic_inc_return_unchecked(instance) - 1; + int len = strlen(basename); + + if (basename[len - 1] >= '0' && basename[len - 1] <= '9') +diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c +index 032ebae..6a3532c 100644 +--- a/drivers/media/video/videobuf-dma-sg.c ++++ b/drivers/media/video/videobuf-dma-sg.c +@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size) + { + struct videobuf_queue q; + ++ pax_track_stack(); ++ + /* Required to make generic handler to call __videobuf_alloc */ + q.int_ops = &sg_ops; + +diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c +index b6992b7..9fa7547 100644 +--- a/drivers/message/fusion/mptbase.c ++++ b/drivers/message/fusion/mptbase.c +@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo + len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); + len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", ++ NULL, NULL); ++#else + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", + (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); ++#endif ++ + /* + * Rounding UP to nearest 4-kB boundary here... + */ +diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c +index 83873e3..e360e9a 100644 +--- a/drivers/message/fusion/mptsas.c ++++ b/drivers/message/fusion/mptsas.c +@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached) + return 0; + } + ++static inline void ++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) ++{ ++ if (phy_info->port_details) { ++ phy_info->port_details->rphy = rphy; ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", ++ ioc->name, rphy)); ++ } ++ ++ if (rphy) { ++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG, ++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", ++ ioc->name, rphy, rphy->dev.release)); ++ } ++} ++ + /* no mutex */ + static void + mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) +@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info) + return NULL; + } + +-static inline void +-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) +-{ +- if (phy_info->port_details) { +- phy_info->port_details->rphy = rphy; +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", +- ioc->name, rphy)); +- } +- +- if (rphy) { +- dsaswideprintk(ioc, dev_printk(KERN_DEBUG, +- &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", +- ioc->name, rphy, rphy->dev.release)); +- } +-} +- + static inline struct sas_port * + mptsas_get_port(struct mptsas_phyinfo *phy_info) + { +diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c +index bd096ca..332cf76 100644 +--- a/drivers/message/fusion/mptscsih.c ++++ b/drivers/message/fusion/mptscsih.c +@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost) + + h = shost_priv(SChost); + +- if (h) { +- if (h->info_kbuf == NULL) +- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) +- return h->info_kbuf; +- h->info_kbuf[0] = '\0'; ++ if (!h) ++ return NULL; + +- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); +- h->info_kbuf[size-1] = '\0'; +- } ++ if (h->info_kbuf == NULL) ++ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) ++ return h->info_kbuf; ++ h->info_kbuf[0] = '\0'; ++ ++ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); ++ h->info_kbuf[size-1] = '\0'; + + return h->info_kbuf; + } +diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c +index efba702..59b2c0f 100644 +--- a/drivers/message/i2o/i2o_config.c ++++ b/drivers/message/i2o/i2o_config.c +@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg) + struct i2o_message *msg; + unsigned int iop; + ++ pax_track_stack(); ++ + if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) + return -EFAULT; + +diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c +index 7045c45..c07b170 100644 +--- a/drivers/message/i2o/i2o_proc.c ++++ b/drivers/message/i2o/i2o_proc.c +@@ -259,13 +259,6 @@ static char *scsi_devices[] = { + "Array Controller Device" + }; + +-static char *chtostr(u8 * chars, int n) +-{ +- char tmp[256]; +- tmp[0] = 0; +- return strncat(tmp, (char *)chars, n); +-} +- + static int i2o_report_query_status(struct seq_file *seq, int block_status, + char *group) + { +@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) + + seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); + seq_printf(seq, "%-#8x", ddm_table.module_id); +- seq_printf(seq, "%-29s", +- chtostr(ddm_table.module_name_version, 28)); ++ seq_printf(seq, "%-.28s", ddm_table.module_name_version); + seq_printf(seq, "%9d ", ddm_table.data_size); + seq_printf(seq, "%8d", ddm_table.code_size); + +@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) + + seq_printf(seq, "%-#7x", dst->i2o_vendor_id); + seq_printf(seq, "%-#8x", dst->module_id); +- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28)); +- seq_printf(seq, "%-9s", chtostr(dst->date, 8)); ++ seq_printf(seq, "%-.28s", dst->module_name_version); ++ seq_printf(seq, "%-.8s", dst->date); + seq_printf(seq, "%8d ", dst->module_size); + seq_printf(seq, "%8d ", dst->mpb_size); + seq_printf(seq, "0x%04x", dst->module_flags); +@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) + seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); + seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); + seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); +- seq_printf(seq, "Vendor info : %s\n", +- chtostr((u8 *) (work32 + 2), 16)); +- seq_printf(seq, "Product info : %s\n", +- chtostr((u8 *) (work32 + 6), 16)); +- seq_printf(seq, "Description : %s\n", +- chtostr((u8 *) (work32 + 10), 16)); +- seq_printf(seq, "Product rev. : %s\n", +- chtostr((u8 *) (work32 + 14), 8)); ++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2)); ++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6)); ++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10)); ++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14)); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, (u8 *) (work32 + 16), +@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) + } + + seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); +- seq_printf(seq, "Module name : %s\n", +- chtostr(result.module_name, 24)); +- seq_printf(seq, "Module revision : %s\n", +- chtostr(result.module_rev, 8)); ++ seq_printf(seq, "Module name : %.24s\n", result.module_name); ++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, result.serial_number, sizeof(result) - 36); +@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) + return 0; + } + +- seq_printf(seq, "Device name : %s\n", +- chtostr(result.device_name, 64)); +- seq_printf(seq, "Service name : %s\n", +- chtostr(result.service_name, 64)); +- seq_printf(seq, "Physical name : %s\n", +- chtostr(result.physical_location, 64)); +- seq_printf(seq, "Instance number : %s\n", +- chtostr(result.instance_number, 4)); ++ seq_printf(seq, "Device name : %.64s\n", result.device_name); ++ seq_printf(seq, "Service name : %.64s\n", result.service_name); ++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location); ++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number); + + return 0; + } +diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c +index 27cf4af..b1205b8 100644 +--- a/drivers/message/i2o/iop.c ++++ b/drivers/message/i2o/iop.c +@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) + + spin_lock_irqsave(&c->context_list_lock, flags); + +- if (unlikely(atomic_inc_and_test(&c->context_list_counter))) +- atomic_inc(&c->context_list_counter); ++ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter))) ++ atomic_inc_unchecked(&c->context_list_counter); + +- entry->context = atomic_read(&c->context_list_counter); ++ entry->context = atomic_read_unchecked(&c->context_list_counter); + + list_add(&entry->list, &c->context_list); + +@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void) + + #if BITS_PER_LONG == 64 + spin_lock_init(&c->context_list_lock); +- atomic_set(&c->context_list_counter, 0); ++ atomic_set_unchecked(&c->context_list_counter, 0); + INIT_LIST_HEAD(&c->context_list); + #endif + +diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c +index 78e3e85..66c9a0d 100644 +--- a/drivers/mfd/ab3100-core.c ++++ b/drivers/mfd/ab3100-core.c +@@ -777,7 +777,7 @@ struct ab_family_id { + char *name; + }; + +-static const struct ab_family_id ids[] __initdata = { ++static const struct ab_family_id ids[] __initconst = { + /* AB3100 */ + { + .id = 0xc0, +diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c +index 8d8c932..8104515 100644 +--- a/drivers/mfd/wm8350-i2c.c ++++ b/drivers/mfd/wm8350-i2c.c +@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg, + u8 msg[(WM8350_MAX_REGISTER << 1) + 1]; + int ret; + ++ pax_track_stack(); ++ + if (bytes > ((WM8350_MAX_REGISTER << 1) + 1)) + return -EINVAL; + +diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c +index e4ff50b..4cc3f04 100644 +--- a/drivers/misc/kgdbts.c ++++ b/drivers/misc/kgdbts.c +@@ -118,7 +118,7 @@ + } while (0) + #define MAX_CONFIG_LEN 40 + +-static struct kgdb_io kgdbts_io_ops; ++static const struct kgdb_io kgdbts_io_ops; + static char get_buf[BUFMAX]; + static int get_buf_cnt; + static char put_buf[BUFMAX]; +@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void) + module_put(THIS_MODULE); + } + +-static struct kgdb_io kgdbts_io_ops = { ++static const struct kgdb_io kgdbts_io_ops = { + .name = "kgdbts", + .read_char = kgdbts_get_char, + .write_char = kgdbts_put_char, +diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c +index 37e7cfc..67cfb76 100644 +--- a/drivers/misc/sgi-gru/gruhandles.c ++++ b/drivers/misc/sgi-gru/gruhandles.c +@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + + static void update_mcs_stats(enum mcs_op op, unsigned long clks) + { +- atomic_long_inc(&mcs_op_statistics[op].count); +- atomic_long_add(clks, &mcs_op_statistics[op].total); ++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count); ++ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total); + if (mcs_op_statistics[op].max < clks) + mcs_op_statistics[op].max = clks; + } +diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c +index 3f2375c..467c6e6 100644 +--- a/drivers/misc/sgi-gru/gruprocfs.c ++++ b/drivers/misc/sgi-gru/gruprocfs.c +@@ -32,9 +32,9 @@ + + #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) + +-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) ++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) + { +- unsigned long val = atomic_long_read(v); ++ unsigned long val = atomic_long_read_unchecked(v); + + if (val) + seq_printf(s, "%16lu %s\n", val, id); +@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p) + "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"}; + + for (op = 0; op < mcsop_last; op++) { +- count = atomic_long_read(&mcs_op_statistics[op].count); +- total = atomic_long_read(&mcs_op_statistics[op].total); ++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); ++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); + max = mcs_op_statistics[op].max; + seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, + count ? total / count : 0, max); +diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h +index 46990bc..4a251b5 100644 +--- a/drivers/misc/sgi-gru/grutables.h ++++ b/drivers/misc/sgi-gru/grutables.h +@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids; + * GRU statistics. + */ + struct gru_stats_s { +- atomic_long_t vdata_alloc; +- atomic_long_t vdata_free; +- atomic_long_t gts_alloc; +- atomic_long_t gts_free; +- atomic_long_t vdata_double_alloc; +- atomic_long_t gts_double_allocate; +- atomic_long_t assign_context; +- atomic_long_t assign_context_failed; +- atomic_long_t free_context; +- atomic_long_t load_user_context; +- atomic_long_t load_kernel_context; +- atomic_long_t lock_kernel_context; +- atomic_long_t unlock_kernel_context; +- atomic_long_t steal_user_context; +- atomic_long_t steal_kernel_context; +- atomic_long_t steal_context_failed; +- atomic_long_t nopfn; +- atomic_long_t break_cow; +- atomic_long_t asid_new; +- atomic_long_t asid_next; +- atomic_long_t asid_wrap; +- atomic_long_t asid_reuse; +- atomic_long_t intr; +- atomic_long_t intr_mm_lock_failed; +- atomic_long_t call_os; +- atomic_long_t call_os_offnode_reference; +- atomic_long_t call_os_check_for_bug; +- atomic_long_t call_os_wait_queue; +- atomic_long_t user_flush_tlb; +- atomic_long_t user_unload_context; +- atomic_long_t user_exception; +- atomic_long_t set_context_option; +- atomic_long_t migrate_check; +- atomic_long_t migrated_retarget; +- atomic_long_t migrated_unload; +- atomic_long_t migrated_unload_delay; +- atomic_long_t migrated_nopfn_retarget; +- atomic_long_t migrated_nopfn_unload; +- atomic_long_t tlb_dropin; +- atomic_long_t tlb_dropin_fail_no_asid; +- atomic_long_t tlb_dropin_fail_upm; +- atomic_long_t tlb_dropin_fail_invalid; +- atomic_long_t tlb_dropin_fail_range_active; +- atomic_long_t tlb_dropin_fail_idle; +- atomic_long_t tlb_dropin_fail_fmm; +- atomic_long_t tlb_dropin_fail_no_exception; +- atomic_long_t tlb_dropin_fail_no_exception_war; +- atomic_long_t tfh_stale_on_fault; +- atomic_long_t mmu_invalidate_range; +- atomic_long_t mmu_invalidate_page; +- atomic_long_t mmu_clear_flush_young; +- atomic_long_t flush_tlb; +- atomic_long_t flush_tlb_gru; +- atomic_long_t flush_tlb_gru_tgh; +- atomic_long_t flush_tlb_gru_zero_asid; ++ atomic_long_unchecked_t vdata_alloc; ++ atomic_long_unchecked_t vdata_free; ++ atomic_long_unchecked_t gts_alloc; ++ atomic_long_unchecked_t gts_free; ++ atomic_long_unchecked_t vdata_double_alloc; ++ atomic_long_unchecked_t gts_double_allocate; ++ atomic_long_unchecked_t assign_context; ++ atomic_long_unchecked_t assign_context_failed; ++ atomic_long_unchecked_t free_context; ++ atomic_long_unchecked_t load_user_context; ++ atomic_long_unchecked_t load_kernel_context; ++ atomic_long_unchecked_t lock_kernel_context; ++ atomic_long_unchecked_t unlock_kernel_context; ++ atomic_long_unchecked_t steal_user_context; ++ atomic_long_unchecked_t steal_kernel_context; ++ atomic_long_unchecked_t steal_context_failed; ++ atomic_long_unchecked_t nopfn; ++ atomic_long_unchecked_t break_cow; ++ atomic_long_unchecked_t asid_new; ++ atomic_long_unchecked_t asid_next; ++ atomic_long_unchecked_t asid_wrap; ++ atomic_long_unchecked_t asid_reuse; ++ atomic_long_unchecked_t intr; ++ atomic_long_unchecked_t intr_mm_lock_failed; ++ atomic_long_unchecked_t call_os; ++ atomic_long_unchecked_t call_os_offnode_reference; ++ atomic_long_unchecked_t call_os_check_for_bug; ++ atomic_long_unchecked_t call_os_wait_queue; ++ atomic_long_unchecked_t user_flush_tlb; ++ atomic_long_unchecked_t user_unload_context; ++ atomic_long_unchecked_t user_exception; ++ atomic_long_unchecked_t set_context_option; ++ atomic_long_unchecked_t migrate_check; ++ atomic_long_unchecked_t migrated_retarget; ++ atomic_long_unchecked_t migrated_unload; ++ atomic_long_unchecked_t migrated_unload_delay; ++ atomic_long_unchecked_t migrated_nopfn_retarget; ++ atomic_long_unchecked_t migrated_nopfn_unload; ++ atomic_long_unchecked_t tlb_dropin; ++ atomic_long_unchecked_t tlb_dropin_fail_no_asid; ++ atomic_long_unchecked_t tlb_dropin_fail_upm; ++ atomic_long_unchecked_t tlb_dropin_fail_invalid; ++ atomic_long_unchecked_t tlb_dropin_fail_range_active; ++ atomic_long_unchecked_t tlb_dropin_fail_idle; ++ atomic_long_unchecked_t tlb_dropin_fail_fmm; ++ atomic_long_unchecked_t tlb_dropin_fail_no_exception; ++ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war; ++ atomic_long_unchecked_t tfh_stale_on_fault; ++ atomic_long_unchecked_t mmu_invalidate_range; ++ atomic_long_unchecked_t mmu_invalidate_page; ++ atomic_long_unchecked_t mmu_clear_flush_young; ++ atomic_long_unchecked_t flush_tlb; ++ atomic_long_unchecked_t flush_tlb_gru; ++ atomic_long_unchecked_t flush_tlb_gru_tgh; ++ atomic_long_unchecked_t flush_tlb_gru_zero_asid; + +- atomic_long_t copy_gpa; ++ atomic_long_unchecked_t copy_gpa; + +- atomic_long_t mesq_receive; +- atomic_long_t mesq_receive_none; +- atomic_long_t mesq_send; +- atomic_long_t mesq_send_failed; +- atomic_long_t mesq_noop; +- atomic_long_t mesq_send_unexpected_error; +- atomic_long_t mesq_send_lb_overflow; +- atomic_long_t mesq_send_qlimit_reached; +- atomic_long_t mesq_send_amo_nacked; +- atomic_long_t mesq_send_put_nacked; +- atomic_long_t mesq_qf_not_full; +- atomic_long_t mesq_qf_locked; +- atomic_long_t mesq_qf_noop_not_full; +- atomic_long_t mesq_qf_switch_head_failed; +- atomic_long_t mesq_qf_unexpected_error; +- atomic_long_t mesq_noop_unexpected_error; +- atomic_long_t mesq_noop_lb_overflow; +- atomic_long_t mesq_noop_qlimit_reached; +- atomic_long_t mesq_noop_amo_nacked; +- atomic_long_t mesq_noop_put_nacked; ++ atomic_long_unchecked_t mesq_receive; ++ atomic_long_unchecked_t mesq_receive_none; ++ atomic_long_unchecked_t mesq_send; ++ atomic_long_unchecked_t mesq_send_failed; ++ atomic_long_unchecked_t mesq_noop; ++ atomic_long_unchecked_t mesq_send_unexpected_error; ++ atomic_long_unchecked_t mesq_send_lb_overflow; ++ atomic_long_unchecked_t mesq_send_qlimit_reached; ++ atomic_long_unchecked_t mesq_send_amo_nacked; ++ atomic_long_unchecked_t mesq_send_put_nacked; ++ atomic_long_unchecked_t mesq_qf_not_full; ++ atomic_long_unchecked_t mesq_qf_locked; ++ atomic_long_unchecked_t mesq_qf_noop_not_full; ++ atomic_long_unchecked_t mesq_qf_switch_head_failed; ++ atomic_long_unchecked_t mesq_qf_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_lb_overflow; ++ atomic_long_unchecked_t mesq_noop_qlimit_reached; ++ atomic_long_unchecked_t mesq_noop_amo_nacked; ++ atomic_long_unchecked_t mesq_noop_put_nacked; + + }; + +@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, + cchop_deallocate, tghop_invalidate, mcsop_last}; + + struct mcs_op_statistic { +- atomic_long_t count; +- atomic_long_t total; ++ atomic_long_unchecked_t count; ++ atomic_long_unchecked_t total; + unsigned long max; + }; + +@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + + #define STAT(id) do { \ + if (gru_options & OPT_STATS) \ +- atomic_long_inc(&gru_stats.id); \ ++ atomic_long_inc_unchecked(&gru_stats.id); \ + } while (0) + + #ifdef CONFIG_SGI_GRU_DEBUG +diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h +index 2275126..12a9dbfb 100644 +--- a/drivers/misc/sgi-xp/xp.h ++++ b/drivers/misc/sgi-xp/xp.h +@@ -289,7 +289,7 @@ struct xpc_interface { + xpc_notify_func, void *); + void (*received) (short, int, void *); + enum xp_retval (*partid_to_nasids) (short, void *); +-}; ++} __no_const; + + extern struct xpc_interface xpc_interface; + +diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h +index b94d5f7..7f494c5 100644 +--- a/drivers/misc/sgi-xp/xpc.h ++++ b/drivers/misc/sgi-xp/xpc.h +@@ -835,6 +835,7 @@ struct xpc_arch_operations { + void (*received_payload) (struct xpc_channel *, void *); + void (*notify_senders_of_disconnect) (struct xpc_channel *); + }; ++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const; + + /* struct xpc_partition act_state values (for XPC HB) */ + +@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[]; + /* found in xpc_main.c */ + extern struct device *xpc_part; + extern struct device *xpc_chan; +-extern struct xpc_arch_operations xpc_arch_ops; ++extern xpc_arch_operations_no_const xpc_arch_ops; + extern int xpc_disengage_timelimit; + extern int xpc_disengage_timedout; + extern int xpc_activate_IRQ_rcvd; +diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c +index fd3688a..7e211a4 100644 +--- a/drivers/misc/sgi-xp/xpc_main.c ++++ b/drivers/misc/sgi-xp/xpc_main.c +@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = { + .notifier_call = xpc_system_die, + }; + +-struct xpc_arch_operations xpc_arch_ops; ++xpc_arch_operations_no_const xpc_arch_ops; + + /* + * Timer function to enforce the timelimit on the partition disengage. +diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c +index 8b70e03..700bda6 100644 +--- a/drivers/misc/sgi-xp/xpc_sn2.c ++++ b/drivers/misc/sgi-xp/xpc_sn2.c +@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) + xpc_acknowledge_msgs_sn2(ch, get, msg->flags); + } + +-static struct xpc_arch_operations xpc_arch_ops_sn2 = { ++static const struct xpc_arch_operations xpc_arch_ops_sn2 = { + .setup_partitions = xpc_setup_partitions_sn2, + .teardown_partitions = xpc_teardown_partitions_sn2, + .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2, +@@ -2413,7 +2413,9 @@ xpc_init_sn2(void) + int ret; + size_t buf_size; + +- xpc_arch_ops = xpc_arch_ops_sn2; ++ pax_open_kernel(); ++ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2)); ++ pax_close_kernel(); + + if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { + dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " +diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c +index 8e08d71..7cb8c9b 100644 +--- a/drivers/misc/sgi-xp/xpc_uv.c ++++ b/drivers/misc/sgi-xp/xpc_uv.c +@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) + XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); + } + +-static struct xpc_arch_operations xpc_arch_ops_uv = { ++static const struct xpc_arch_operations xpc_arch_ops_uv = { + .setup_partitions = xpc_setup_partitions_uv, + .teardown_partitions = xpc_teardown_partitions_uv, + .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, +@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { + int + xpc_init_uv(void) + { +- xpc_arch_ops = xpc_arch_ops_uv; ++ pax_open_kernel(); ++ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv)); ++ pax_close_kernel(); + + if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { + dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", +diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c +index 6fd20b42..650efe3 100644 +--- a/drivers/mmc/host/sdhci-pci.c ++++ b/drivers/mmc/host/sdhci-pci.c +@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = { + .probe = via_probe, + }; + +-static const struct pci_device_id pci_ids[] __devinitdata = { ++static const struct pci_device_id pci_ids[] __devinitconst = { + { + .vendor = PCI_VENDOR_ID_RICOH, + .device = PCI_DEVICE_ID_RICOH_R5C822, +diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c +index e7563a9..5f90ce5 100644 +--- a/drivers/mtd/chips/cfi_cmdset_0001.c ++++ b/drivers/mtd/chips/cfi_cmdset_0001.c +@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long + struct cfi_pri_intelext *cfip = cfi->cmdset_priv; + unsigned long timeo = jiffies + HZ; + ++ pax_track_stack(); ++ + /* Prevent setting state FL_SYNCING for chip in suspended state. */ + if (mode == FL_SYNCING && chip->oldstate != FL_READY) + goto sleep; +@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, + unsigned long initial_adr; + int initial_len = len; + ++ pax_track_stack(); ++ + wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; + adr += chip->start; + initial_adr = adr; +@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, + int retries = 3; + int ret; + ++ pax_track_stack(); ++ + adr += chip->start; + + retry: +diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c +index 0667a67..3ab97ed 100644 +--- a/drivers/mtd/chips/cfi_cmdset_0020.c ++++ b/drivers/mtd/chips/cfi_cmdset_0020.c +@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof + unsigned long cmd_addr; + struct cfi_private *cfi = map->fldrv_priv; + ++ pax_track_stack(); ++ + adr += chip->start; + + /* Ensure cmd read/writes are aligned. */ +@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, + DECLARE_WAITQUEUE(wait, current); + int wbufsize, z; + ++ pax_track_stack(); ++ + /* M58LW064A requires bus alignment for buffer wriets -- saw */ + if (adr & (map_bankwidth(map)-1)) + return -EINVAL; +@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u + DECLARE_WAITQUEUE(wait, current); + int ret = 0; + ++ pax_track_stack(); ++ + adr += chip->start; + + /* Let's determine this according to the interleave only once */ +@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un + unsigned long timeo = jiffies + HZ; + DECLARE_WAITQUEUE(wait, current); + ++ pax_track_stack(); ++ + adr += chip->start; + + /* Let's determine this according to the interleave only once */ +@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, + unsigned long timeo = jiffies + HZ; + DECLARE_WAITQUEUE(wait, current); + ++ pax_track_stack(); ++ + adr += chip->start; + + /* Let's determine this according to the interleave only once */ +diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c +index 5bf5f46..c5de373 100644 +--- a/drivers/mtd/devices/doc2000.c ++++ b/drivers/mtd/devices/doc2000.c +@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, + + /* The ECC will not be calculated correctly if less than 512 is written */ + /* DBB- +- if (len != 0x200 && eccbuf) ++ if (len != 0x200) + printk(KERN_WARNING + "ECC needs a full sector write (adr: %lx size %lx)\n", + (long) to, (long) len); +diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c +index 0990f78..bb4e8a4 100644 +--- a/drivers/mtd/devices/doc2001.c ++++ b/drivers/mtd/devices/doc2001.c +@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, + struct Nand *mychip = &this->chips[from >> (this->chipshift)]; + + /* Don't allow read past end of device */ +- if (from >= this->totlen) ++ if (from >= this->totlen || !len) + return -EINVAL; + + /* Don't allow a single read to cross a 512-byte block boundary */ +diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c +index e56d6b4..f07e6cf 100644 +--- a/drivers/mtd/ftl.c ++++ b/drivers/mtd/ftl.c +@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit, + loff_t offset; + uint16_t srcunitswap = cpu_to_le16(srcunit); + ++ pax_track_stack(); ++ + eun = &part->EUNInfo[srcunit]; + xfer = &part->XferInfo[xferunit]; + DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", +diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c +index 8aca552..146446e 100755 +--- a/drivers/mtd/inftlcore.c ++++ b/drivers/mtd/inftlcore.c +@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned + struct inftl_oob oob; + size_t retlen; + ++ pax_track_stack(); ++ + DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," + "pending=%d)\n", inftl, thisVUC, pendingblock); + +diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c +index 32e82ae..ed50953 100644 +--- a/drivers/mtd/inftlmount.c ++++ b/drivers/mtd/inftlmount.c +@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl) + struct INFTLPartition *ip; + size_t retlen; + ++ pax_track_stack(); ++ + DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl); + + /* +diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c +index 79bf40f..fe5f8fd 100644 +--- a/drivers/mtd/lpddr/qinfo_probe.c ++++ b/drivers/mtd/lpddr/qinfo_probe.c +@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr) + { + map_word pfow_val[4]; + ++ pax_track_stack(); ++ + /* Check identification string */ + pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P); + pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F); +diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c +index 726a1b8..f46b460 100644 +--- a/drivers/mtd/mtdchar.c ++++ b/drivers/mtd/mtdchar.c +@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file, + u_long size; + struct mtd_info_user info; + ++ pax_track_stack(); ++ + DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); + + size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; +diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c +index 1002e18..26d82d5 100644 +--- a/drivers/mtd/nftlcore.c ++++ b/drivers/mtd/nftlcore.c +@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p + int inplace = 1; + size_t retlen; + ++ pax_track_stack(); ++ + memset(BlockMap, 0xff, sizeof(BlockMap)); + memset(BlockFreeFound, 0, sizeof(BlockFreeFound)); + +diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c +index 8b22b18..6fada85 100644 +--- a/drivers/mtd/nftlmount.c ++++ b/drivers/mtd/nftlmount.c +@@ -23,6 +23,7 @@ + #include <asm/errno.h> + #include <linux/delay.h> + #include <linux/slab.h> ++#include <linux/sched.h> + #include <linux/mtd/mtd.h> + #include <linux/mtd/nand.h> + #include <linux/mtd/nftl.h> +@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl) + struct mtd_info *mtd = nftl->mbd.mtd; + unsigned int i; + ++ pax_track_stack(); ++ + /* Assume logical EraseSize == physical erasesize for starting the scan. + We'll sort it out later if we find a MediaHeader which says otherwise */ + /* Actually, we won't. The new DiskOnChip driver has already scanned +diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c +index ab68886..ca405e8 100644 +--- a/drivers/net/atlx/atl2.c ++++ b/drivers/net/atlx/atl2.c +@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw) + */ + + #define ATL2_PARAM(X, desc) \ +- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ ++ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); + #else +diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile +index a60cd80..0ed11ef 100644 +--- a/drivers/net/benet/Makefile ++++ b/drivers/net/benet/Makefile +@@ -1,7 +1,9 @@ + # +-# Makefile to build the network driver for ServerEngine's BladeEngine. ++# Makefile to build the be2net network driver + # + ++EXTRA_CFLAGS += -DCONFIG_PALAU ++ + obj-$(CONFIG_BE2NET) += be2net.o + +-be2net-y := be_main.o be_cmds.o be_ethtool.o ++be2net-y := be_main.o be_cmds.o be_ethtool.o be_compat.o be_misc.o +diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h +index 5c74ff0..7382603 100644 +--- a/drivers/net/benet/be.h ++++ b/drivers/net/benet/be.h +@@ -1,18 +1,18 @@ + /* +- * Copyright (C) 2005 - 2009 ServerEngines ++ * Copyright (C) 2005 - 2011 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation. The full GNU General ++ * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: +- * linux-drivers@serverengines.com ++ * linux-drivers@emulex.com + * +- * ServerEngines +- * 209 N. Fair Oaks Ave +- * Sunnyvale, CA 94085 ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 + */ + + #ifndef BE_H +@@ -29,32 +29,53 @@ + #include <linux/workqueue.h> + #include <linux/interrupt.h> + #include <linux/firmware.h> ++#include <linux/jhash.h> ++#ifndef CONFIG_PALAU ++#include <linux/inet_lro.h> ++#endif + ++#ifdef CONFIG_PALAU ++#include "be_compat.h" ++#endif + #include "be_hw.h" + +-#define DRV_VER "2.101.205" ++#ifdef CONFIG_PALAU ++#include "version.h" ++#define DRV_VER STR_BE_MAJOR "." STR_BE_MINOR "."\ ++ STR_BE_BUILD "." STR_BE_BRANCH ++#else ++#define DRV_VER "2.0.348" ++#endif + #define DRV_NAME "be2net" +-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" +-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" +-#define OC_NAME "Emulex OneConnect 10Gbps NIC" +-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" +-#define DRV_DESC BE_NAME "Driver" ++#define BE_NAME "Emulex BladeEngine2" ++#define BE3_NAME "Emulex BladeEngine3" ++#define OC_NAME "Emulex OneConnect" ++#define OC_NAME_BE OC_NAME "(be3)" ++#define OC_NAME_LANCER OC_NAME "(Lancer)" ++#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" + +-#define BE_VENDOR_ID 0x19a2 ++#define BE_VENDOR_ID 0x19a2 ++#define EMULEX_VENDOR_ID 0x10df + #define BE_DEVICE_ID1 0x211 + #define BE_DEVICE_ID2 0x221 +-#define OC_DEVICE_ID1 0x700 +-#define OC_DEVICE_ID2 0x701 +-#define OC_DEVICE_ID3 0x710 ++#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */ ++#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */ ++#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ ++ ++#define OC_SUBSYS_DEVICE_ID1 0xE602 ++#define OC_SUBSYS_DEVICE_ID2 0xE642 ++#define OC_SUBSYS_DEVICE_ID3 0xE612 ++#define OC_SUBSYS_DEVICE_ID4 0xE652 + + static inline char *nic_name(struct pci_dev *pdev) + { + switch (pdev->device) { + case OC_DEVICE_ID1: +- case OC_DEVICE_ID2: + return OC_NAME; ++ case OC_DEVICE_ID2: ++ return OC_NAME_BE; + case OC_DEVICE_ID3: +- return OC_NAME1; ++ return OC_NAME_LANCER; + case BE_DEVICE_ID2: + return BE3_NAME; + default: +@@ -63,7 +84,7 @@ static inline char *nic_name(struct pci_dev *pdev) + } + + /* Number of bytes of an RX frame that are copied to skb->data */ +-#define BE_HDR_LEN 64 ++#define BE_HDR_LEN ((u16) 64) + #define BE_MAX_JUMBO_FRAME_SIZE 9018 + #define BE_MIN_MTU 256 + +@@ -79,10 +100,24 @@ static inline char *nic_name(struct pci_dev *pdev) + #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ + #define MCC_CQ_LEN 256 + ++#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */ ++ ++#define MAX_RX_QS (MAX_RSS_QS + 1) ++ ++#ifdef MQ_TX ++#define MAX_TX_QS 8 ++#else ++#define MAX_TX_QS 1 ++#endif ++ ++#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RSS qs + 1 def Rx + Tx */ + #define BE_NAPI_WEIGHT 64 +-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ ++#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ + #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) + ++#define BE_MAX_LRO_DESCRIPTORS 16 ++#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS)) ++ + #define FW_VER_LEN 32 + + struct be_dma_mem { +@@ -127,6 +162,11 @@ static inline void *queue_tail_node(struct be_queue_info *q) + return q->dma_mem.va + q->tail * q->entry_size; + } + ++static inline void *queue_index_node(struct be_queue_info *q, u16 index) ++{ ++ return q->dma_mem.va + index * q->entry_size; ++} ++ + static inline void queue_head_inc(struct be_queue_info *q) + { + index_inc(&q->head, q->len); +@@ -137,6 +177,7 @@ static inline void queue_tail_inc(struct be_queue_info *q) + index_inc(&q->tail, q->len); + } + ++ + struct be_eq_obj { + struct be_queue_info q; + char desc[32]; +@@ -146,6 +187,7 @@ struct be_eq_obj { + u16 min_eqd; /* in usecs */ + u16 max_eqd; /* in usecs */ + u16 cur_eqd; /* in usecs */ ++ u8 eq_idx; + + struct napi_struct napi; + }; +@@ -153,49 +195,20 @@ struct be_eq_obj { + struct be_mcc_obj { + struct be_queue_info q; + struct be_queue_info cq; ++ bool rearm_cq; + }; + +-struct be_drvr_stats { ++struct be_tx_stats { + u32 be_tx_reqs; /* number of TX requests initiated */ + u32 be_tx_stops; /* number of times TX Q was stopped */ +- u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */ + u32 be_tx_wrbs; /* number of tx WRBs used */ +- u32 be_tx_events; /* number of tx completion events */ + u32 be_tx_compl; /* number of tx completion entries processed */ + ulong be_tx_jiffies; + u64 be_tx_bytes; + u64 be_tx_bytes_prev; + u64 be_tx_pkts; + u32 be_tx_rate; +- +- u32 cache_barrier[16]; +- +- u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */ +- u32 be_polls; /* number of times NAPI called poll function */ +- u32 be_rx_events; /* number of ucast rx completion events */ +- u32 be_rx_compl; /* number of rx completion entries processed */ +- ulong be_rx_jiffies; +- u64 be_rx_bytes; +- u64 be_rx_bytes_prev; +- u64 be_rx_pkts; +- u32 be_rx_rate; +- /* number of non ether type II frames dropped where +- * frame len > length field of Mac Hdr */ +- u32 be_802_3_dropped_frames; +- /* number of non ether type II frames malformed where +- * in frame len < length field of Mac Hdr */ +- u32 be_802_3_malformed_frames; +- u32 be_rxcp_err; /* Num rx completion entries w/ err set. */ +- ulong rx_fps_jiffies; /* jiffies at last FPS calc */ +- u32 be_rx_frags; +- u32 be_prev_rx_frags; +- u32 be_rx_fps; /* Rx frags per second */ +-}; +- +-struct be_stats_obj { +- struct be_drvr_stats drvr_stats; +- struct net_device_stats net_stats; +- struct be_dma_mem cmd; ++ u32 be_ipv6_ext_hdr_tx_drop; + }; + + struct be_tx_obj { +@@ -203,23 +216,124 @@ struct be_tx_obj { + struct be_queue_info cq; + /* Remember the skbs that were transmitted */ + struct sk_buff *sent_skb_list[TX_Q_LEN]; ++ struct be_tx_stats stats; + }; + + /* Struct to remember the pages posted for rx frags */ + struct be_rx_page_info { + struct page *page; +- dma_addr_t bus; ++ DEFINE_DMA_UNMAP_ADDR(bus); + u16 page_offset; + bool last_page_user; + }; + ++struct be_rx_stats { ++ u32 rx_post_fail;/* number of ethrx buffer alloc failures */ ++ u32 rx_polls; /* number of times NAPI called poll function */ ++ u32 rx_events; /* number of ucast rx completion events */ ++ u32 rx_compl; /* number of rx completion entries processed */ ++ ulong rx_jiffies; ++ u64 rx_bytes; ++ u64 rx_bytes_prev; ++ u64 rx_pkts; ++ u32 rx_rate; ++ u32 rx_mcast_pkts; ++ u32 rxcp_err; /* Num rx completion entries w/ err set. */ ++ ulong rx_fps_jiffies; /* jiffies at last FPS calc */ ++ u32 rx_frags; ++ u32 prev_rx_frags; ++ u32 rx_fps; /* Rx frags per second */ ++ u32 rx_drops_no_frags; ++}; ++ ++struct be_rx_compl_info { ++ u32 rss_hash; ++ u16 vlan_tag; ++ u16 pkt_size; ++ u16 rxq_idx; ++ u16 port; ++ u8 vlanf; ++ u8 num_rcvd; ++ u8 err; ++ u8 ipf; ++ u8 tcpf; ++ u8 udpf; ++ u8 ip_csum; ++ u8 l4_csum; ++ u8 ipv6; ++ u8 vtm; ++ u8 pkt_type; ++}; ++ + struct be_rx_obj { ++ struct be_adapter *adapter; + struct be_queue_info q; + struct be_queue_info cq; +- struct be_rx_page_info page_info_tbl[RX_Q_LEN]; ++ struct be_rx_compl_info rxcp; ++ struct be_rx_page_info *page_info_tbl; ++ struct net_lro_mgr lro_mgr; ++ struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS]; ++ struct be_eq_obj rx_eq; ++ struct be_rx_stats stats; ++ u8 rss_id; ++ bool rx_post_starved; /* Zero rx frags have been posted to BE */ ++ u16 prev_frag_idx; ++ u32 cache_line_barrier[16]; + }; + +-#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ ++struct be_drv_stats { ++ u32 be_on_die_temperature; ++ u32 be_tx_events; ++ u32 eth_red_drops; ++ u32 rx_drops_no_pbuf; ++ u32 rx_drops_no_txpb; ++ u32 rx_drops_no_erx_descr; ++ u32 rx_drops_no_tpre_descr; ++ u32 rx_drops_too_many_frags; ++ u32 rx_drops_invalid_ring; ++ u32 forwarded_packets; ++ u32 rx_drops_mtu; ++ u32 rx_crc_errors; ++ u32 rx_alignment_symbol_errors; ++ u32 rx_pause_frames; ++ u32 rx_priority_pause_frames; ++ u32 rx_control_frames; ++ u32 rx_in_range_errors; ++ u32 rx_out_range_errors; ++ u32 rx_frame_too_long; ++ u32 rx_address_match_errors; ++ u32 rx_dropped_too_small; ++ u32 rx_dropped_too_short; ++ u32 rx_dropped_header_too_small; ++ u32 rx_dropped_tcp_length; ++ u32 rx_dropped_runt; ++ u32 rx_ip_checksum_errs; ++ u32 rx_tcp_checksum_errs; ++ u32 rx_udp_checksum_errs; ++ u32 rx_switched_unicast_packets; ++ u32 rx_switched_multicast_packets; ++ u32 rx_switched_broadcast_packets; ++ u32 tx_pauseframes; ++ u32 tx_priority_pauseframes; ++ u32 tx_controlframes; ++ u32 rxpp_fifo_overflow_drop; ++ u32 rx_input_fifo_overflow_drop; ++ u32 pmem_fifo_overflow_drop; ++ u32 jabber_events; ++}; ++ ++struct be_vf_cfg { ++ unsigned char vf_mac_addr[ETH_ALEN]; ++ u32 vf_if_handle; ++ u32 vf_pmac_id; ++ u16 vf_def_vid; ++ u16 vf_vlan_tag; ++ u32 vf_tx_rate; ++}; ++ ++#define BE_INVALID_PMAC_ID 0xffffffff ++#define BE_FLAGS_DCBX (1 << 16) ++ + struct be_adapter { + struct pci_dev *pdev; + struct net_device *netdev; +@@ -228,7 +342,7 @@ struct be_adapter { + u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* PCI config space */ + +- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ ++ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ + struct be_dma_mem mbox_mem; + /* Mbox mem is adjusted to align to 16 bytes. The allocated addr + * is stored for freeing purpose */ +@@ -238,66 +352,121 @@ struct be_adapter { + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ + spinlock_t mcc_cq_lock; + +- struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS]; +- bool msix_enabled; ++ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS]; ++ u32 num_msix_vec; + bool isr_registered; + + /* TX Rings */ + struct be_eq_obj tx_eq; +- struct be_tx_obj tx_obj; ++ struct be_tx_obj tx_obj[MAX_TX_QS]; ++ u8 num_tx_qs; ++ u8 prio_tc_map[MAX_TX_QS]; /* prio_tc_map[prio] => tc-id */ ++ u8 tc_txq_map[MAX_TX_QS]; /* tc_txq_map[tc-id] => txq index */ + + u32 cache_line_break[8]; + + /* Rx rings */ +- struct be_eq_obj rx_eq; +- struct be_rx_obj rx_obj; ++ struct be_rx_obj rx_obj[MAX_RX_QS]; /* one default non-rss Q */ ++ u32 num_rx_qs; ++ ++ struct be_dma_mem stats_cmd; ++ struct net_device_stats net_stats; ++ struct be_drv_stats drv_stats; + u32 big_page_size; /* Compounded page size shared by rx wrbs */ +- bool rx_post_starved; /* Zero rx frags have been posted to BE */ + + struct vlan_group *vlan_grp; +- u16 num_vlans; ++ u16 vlans_added; ++ u16 max_vlans; /* Number of vlans supported */ + u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; ++ u8 vlan_prio_bmap; /* Available priority BitMap */ ++ u16 recommended_prio; /* Recommended Priority */ ++ struct be_dma_mem rx_filter; + +- struct be_stats_obj stats; + /* Work queue used to perform periodic tasks like getting statistics */ + struct delayed_work work; ++ u16 work_counter; + +- /* Ethtool knobs and info */ +- bool rx_csum; /* BE card must perform rx-checksumming */ ++ u32 flags; ++ bool rx_csum; /* BE card must perform rx-checksumming */ ++ u32 max_rx_coal; + char fw_ver[FW_VER_LEN]; + u32 if_handle; /* Used to configure filtering */ + u32 pmac_id; /* MAC addr handle used by BE card */ ++ u32 beacon_state; /* for set_phys_id */ + +- bool link_up; ++ bool eeh_err; ++ int link_status; + u32 port_num; ++ u32 hba_port_num; + bool promiscuous; +- u32 cap; ++ bool wol; ++ u32 function_mode; ++ u32 function_caps; + u32 rx_fc; /* Rx flow control */ + u32 tx_fc; /* Tx flow control */ ++ bool ue_detected; ++ bool stats_cmd_sent; ++ bool gro_supported; ++ int link_speed; ++ u8 port_type; ++ u8 transceiver; ++ u8 autoneg; + u8 generation; /* BladeEngine ASIC generation */ ++ u32 flash_status; ++ struct completion flash_compl; ++ ++ u8 eq_next_idx; ++ bool be3_native; ++ u16 num_vfs; ++ struct be_vf_cfg *vf_cfg; ++ u8 is_virtfn; ++ u16 pvid; ++ u32 sli_family; ++ u8 port_name[4]; ++ char model_number[32]; + }; + + /* BladeEngine Generation numbers */ + #define BE_GEN2 2 + #define BE_GEN3 3 + +-extern const struct ethtool_ops be_ethtool_ops; ++#define ON 1 ++#define OFF 0 ++#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3) ++#define lancer_A0_chip(adapter) \ ++ (adapter->sli_family == LANCER_A0_SLI_FAMILY) + +-#define drvr_stats(adapter) (&adapter->stats.drvr_stats) ++extern struct ethtool_ops be_ethtool_ops; + +-static inline unsigned int be_pci_func(struct be_adapter *adapter) +-{ +- return PCI_FUNC(adapter->pdev->devfn); +-} ++#define msix_enabled(adapter) (adapter->num_msix_vec > 0) ++#define tx_stats(txo) (&txo->stats) ++#define rx_stats(rxo) (&rxo->stats) + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) ++#define BE_SET_NETDEV_OPS(netdev, ops) be_netdev_ops_init(netdev, ops) ++#else + #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) ++#endif ++ ++#define for_all_rx_queues(adapter, rxo, i) \ ++ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \ ++ i++, rxo++) ++ ++/* Just skip the first default non-rss queue */ ++#define for_all_rss_queues(adapter, rxo, i) \ ++ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\ ++ i++, rxo++) ++ ++#define for_all_tx_queues(adapter, txo, i) \ ++ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \ ++ i++, txo++) + + #define PAGE_SHIFT_4K 12 + #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) + + /* Returns number of pages spanned by the data starting at the given addr */ +-#define PAGES_4K_SPANNED(_address, size) \ +- ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ ++#define PAGES_4K_SPANNED(_address, size) \ ++ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ + (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) + + /* Byte offset into the page corresponding to given address */ +@@ -305,7 +474,7 @@ static inline unsigned int be_pci_func(struct be_adapter *adapter) + ((size_t)(addr) & (PAGE_SIZE_4K-1)) + + /* Returns bit offset within a DWORD of a bitfield */ +-#define AMAP_BIT_OFFSET(_struct, field) \ ++#define AMAP_BIT_OFFSET(_struct, field) \ + (((size_t)&(((_struct *)0)->field))%32) + + /* Returns the bit mask of the field that is NOT shifted into location. */ +@@ -356,6 +525,11 @@ static inline void swap_dws(void *wrb, int len) + #endif /* __BIG_ENDIAN */ + } + ++static inline bool vlan_configured(struct be_adapter *adapter) ++{ ++ return adapter->vlan_grp && adapter->vlans_added; ++} ++ + static inline u8 is_tcp_pkt(struct sk_buff *skb) + { + u8 val = 0; +@@ -380,9 +554,65 @@ static inline u8 is_udp_pkt(struct sk_buff *skb) + return val; + } + ++static inline u8 is_ipv6_ext_hdr(struct sk_buff *skb) ++{ ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ if (ip_hdr(skb)->version == 6) ++ return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr); ++ else ++#endif ++ return 0; ++} ++ ++static inline void be_check_sriov_fn_type(struct be_adapter *adapter) ++{ ++ u32 sli_intf; ++ ++ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); ++ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; ++} ++ ++static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) ++{ ++ u32 addr; ++ ++ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0); ++ ++ mac[5] = (u8)(addr & 0xFF); ++ mac[4] = (u8)((addr >> 8) & 0xFF); ++ mac[3] = (u8)((addr >> 16) & 0xFF); ++ /* Use the OUI programmed in hardware */ ++ memcpy(mac, adapter->netdev->dev_addr, 3); ++} ++ ++static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, ++ struct sk_buff *skb) ++{ ++ u8 vlan_prio = 0; ++ u16 vlan_tag = 0; ++ ++ vlan_tag = vlan_tx_tag_get(skb); ++ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; ++ /* If vlan priority provided by OS is NOT in available bmap */ ++ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) ++ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | ++ adapter->recommended_prio; ++ ++ return vlan_tag; ++} ++ ++#define be_physfn(adapter) (!adapter->is_virtfn) ++ + extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, + u16 num_popped); +-extern void be_link_status_update(struct be_adapter *adapter, bool link_up); ++extern void be_link_status_update(struct be_adapter *adapter, int link_status); + extern void netdev_stats_update(struct be_adapter *adapter); ++extern void be_parse_stats(struct be_adapter *adapter); + extern int be_load_fw(struct be_adapter *adapter, u8 *func); ++ ++#ifdef CONFIG_PALAU ++extern void be_sysfs_create_group(struct be_adapter *adapter); ++extern void be_sysfs_remove_group(struct be_adapter *adapter); ++#endif ++ + #endif /* BE_H */ +diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c +index 28a0eda..b4ca89c 100644 +--- a/drivers/net/benet/be_cmds.c ++++ b/drivers/net/benet/be_cmds.c +@@ -1,30 +1,45 @@ + /* +- * Copyright (C) 2005 - 2009 ServerEngines ++ * Copyright (C) 2005 - 2011 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation. The full GNU General ++ * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: +- * linux-drivers@serverengines.com ++ * linux-drivers@emulex.com + * +- * ServerEngines +- * 209 N. Fair Oaks Ave +- * Sunnyvale, CA 94085 ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 + */ + + #include "be.h" + #include "be_cmds.h" + ++/* Must be a power of 2 or else MODULO will BUG_ON */ ++static int be_get_temp_freq = 64; ++ ++static inline void *embedded_payload(struct be_mcc_wrb *wrb) ++{ ++ return wrb->payload.embedded_payload; ++} ++ + static void be_mcc_notify(struct be_adapter *adapter) + { + struct be_queue_info *mccq = &adapter->mcc_obj.q; + u32 val = 0; + ++ if (adapter->eeh_err) { ++ dev_info(&adapter->pdev->dev, "Error in Card Detected! Cannot issue commands\n"); ++ return; ++ } ++ + val |= mccq->id & DB_MCCQ_RING_ID_MASK; + val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; ++ ++ wmb(); + iowrite32(val, adapter->db + DB_MCCQ_OFFSET); + } + +@@ -59,21 +74,67 @@ static int be_mcc_compl_process(struct be_adapter *adapter, + + compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & + CQE_STATUS_COMPL_MASK; ++ ++ if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) && ++ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { ++ adapter->flash_status = compl_status; ++ complete(&adapter->flash_compl); ++ } ++ + if (compl_status == MCC_STATUS_SUCCESS) { +- if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { +- struct be_cmd_resp_get_stats *resp = +- adapter->stats.cmd.va; +- be_dws_le_to_cpu(&resp->hw_stats, +- sizeof(resp->hw_stats)); ++ if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) && ++ (compl->tag1 == CMD_SUBSYSTEM_ETH)) { ++ if (adapter->generation == BE_GEN3) { ++ struct be_cmd_resp_get_stats_v1 *resp = ++ adapter->stats_cmd.va; ++ ++ be_dws_le_to_cpu(&resp->hw_stats, ++ sizeof(resp->hw_stats)); ++ } else { ++ struct be_cmd_resp_get_stats_v0 *resp = ++ adapter->stats_cmd.va; ++ ++ be_dws_le_to_cpu(&resp->hw_stats, ++ sizeof(resp->hw_stats)); ++ } ++ be_parse_stats(adapter); + netdev_stats_update(adapter); ++ adapter->stats_cmd_sent = false; ++ } ++ if (compl->tag0 == ++ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) { ++ struct be_mcc_wrb *mcc_wrb = ++ queue_index_node(&adapter->mcc_obj.q, ++ compl->tag1); ++ struct be_cmd_resp_get_cntl_addnl_attribs *resp = ++ embedded_payload(mcc_wrb); ++ adapter->drv_stats.be_on_die_temperature = ++ resp->on_die_temperature; ++ } ++ } else { ++ if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) ++ be_get_temp_freq = 0; ++ ++ if (compl->tag1 == MCC_WRB_PASS_THRU) ++ goto done; ++ ++ if (compl_status == MCC_STATUS_NOT_SUPPORTED || ++ compl_status == MCC_STATUS_ILLEGAL_REQUEST) ++ goto done; ++ ++ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { ++ dev_warn(&adapter->pdev->dev, "This domain(VM) is not " ++ "permitted to execute this cmd (opcode %d)\n", ++ compl->tag0); ++ } else { ++ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & ++ CQE_STATUS_EXTD_MASK; ++ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" ++ "status %d, extd-status %d\n", ++ compl->tag0, compl_status, extd_status); + } +- } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) { +- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & +- CQE_STATUS_EXTD_MASK; +- dev_warn(&adapter->pdev->dev, +- "Error in cmd completion: status(compl/extd)=%d/%d\n", +- compl_status, extd_status); + } ++done: + return compl_status; + } + +@@ -82,7 +143,70 @@ static void be_async_link_state_process(struct be_adapter *adapter, + struct be_async_event_link_state *evt) + { + be_link_status_update(adapter, +- evt->port_link_status == ASYNC_EVENT_LINK_UP); ++ ((evt->port_link_status & ~ASYNC_EVENT_LOGICAL) == ++ ASYNC_EVENT_LINK_UP ? LINK_UP : LINK_DOWN)); ++} ++ ++/* Grp5 CoS Priority evt */ ++static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, ++ struct be_async_event_grp5_cos_priority *evt) ++{ ++ if (evt->valid) { ++ adapter->vlan_prio_bmap = evt->available_priority_bmap; ++ adapter->recommended_prio &= ~VLAN_PRIO_MASK; ++ adapter->recommended_prio = ++ evt->reco_default_priority << VLAN_PRIO_SHIFT; ++ } ++} ++ ++/* Grp5 QOS Speed evt */ ++static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, ++ struct be_async_event_grp5_qos_link_speed *evt) ++{ ++ if (evt->physical_port == adapter->hba_port_num) { ++ /* qos_link_speed is in units of 10 Mbps */ ++ adapter->link_speed = evt->qos_link_speed * 10; ++ } ++} ++ ++/*Grp5 PVID evt*/ ++static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, ++ struct be_async_event_grp5_pvid_state *evt) ++{ ++ if (evt->enabled) ++ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK ; ++ else ++ adapter->pvid = 0; ++} ++ ++static void be_async_grp5_evt_process(struct be_adapter *adapter, ++ u32 trailer, struct be_mcc_compl *evt) ++{ ++ u8 event_type = 0; ++ ++ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & ++ ASYNC_TRAILER_EVENT_TYPE_MASK; ++ ++ switch (event_type) { ++ case ASYNC_EVENT_COS_PRIORITY: ++ be_async_grp5_cos_priority_process(adapter, ++ (struct be_async_event_grp5_cos_priority *)evt); ++ break; ++ case ASYNC_EVENT_QOS_SPEED: ++ be_async_grp5_qos_speed_process(adapter, ++ (struct be_async_event_grp5_qos_link_speed *)evt); ++ break; ++ case ASYNC_EVENT_PVID_STATE: ++ be_async_grp5_pvid_state_process(adapter, ++ (struct be_async_event_grp5_pvid_state *)evt); ++ break; ++ case GRP5_TYPE_PRIO_TC_MAP: ++ memcpy(adapter->prio_tc_map, evt, MAX_TX_QS); ++ break; ++ default: ++ printk(KERN_WARNING "Unknown grp5 event!\n"); ++ break; ++ } + } + + static inline bool is_link_state_evt(u32 trailer) +@@ -92,6 +216,13 @@ static inline bool is_link_state_evt(u32 trailer) + ASYNC_EVENT_CODE_LINK_STATE); + } + ++static inline bool is_grp5_evt(u32 trailer) ++{ ++ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & ++ ASYNC_TRAILER_EVENT_CODE_MASK) == ++ ASYNC_EVENT_CODE_GRP_5); ++} ++ + static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) + { + struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; +@@ -104,46 +235,67 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) + return NULL; + } + +-int be_process_mcc(struct be_adapter *adapter) ++void be_async_mcc_enable(struct be_adapter *adapter) ++{ ++ spin_lock_bh(&adapter->mcc_cq_lock); ++ ++ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); ++ adapter->mcc_obj.rearm_cq = true; ++ ++ spin_unlock_bh(&adapter->mcc_cq_lock); ++} ++ ++void be_async_mcc_disable(struct be_adapter *adapter) ++{ ++ adapter->mcc_obj.rearm_cq = false; ++} ++ ++int be_process_mcc(struct be_adapter *adapter, int *status) + { + struct be_mcc_compl *compl; +- int num = 0, status = 0; ++ int num = 0; ++ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + + spin_lock_bh(&adapter->mcc_cq_lock); + while ((compl = be_mcc_compl_get(adapter))) { + if (compl->flags & CQE_FLAGS_ASYNC_MASK) { + /* Interpret flags as an async trailer */ +- BUG_ON(!is_link_state_evt(compl->flags)); +- +- /* Interpret compl as a async link evt */ +- be_async_link_state_process(adapter, ++ if (is_link_state_evt(compl->flags)) ++ be_async_link_state_process(adapter, + (struct be_async_event_link_state *) compl); ++ else if (is_grp5_evt(compl->flags)) ++ be_async_grp5_evt_process(adapter, ++ compl->flags, compl); ++ + } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { +- status = be_mcc_compl_process(adapter, compl); +- atomic_dec(&adapter->mcc_obj.q.used); ++ *status = be_mcc_compl_process(adapter, compl); ++ atomic_dec(&mcc_obj->q.used); + } + be_mcc_compl_use(compl); + num++; + } + +- if (num) +- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num); +- + spin_unlock_bh(&adapter->mcc_cq_lock); +- return status; ++ return num; + } + + /* Wait till no more pending mcc requests are present */ + static int be_mcc_wait_compl(struct be_adapter *adapter) + { + #define mcc_timeout 120000 /* 12s timeout */ +- int i, status; ++ int i, num, status = 0; ++ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; ++ ++ if (adapter->eeh_err) ++ return -EIO; ++ + for (i = 0; i < mcc_timeout; i++) { +- status = be_process_mcc(adapter); +- if (status) +- return status; ++ num = be_process_mcc(adapter, &status); ++ if (num) ++ be_cq_notify(adapter, mcc_obj->cq.id, ++ mcc_obj->rearm_cq, num); + +- if (atomic_read(&adapter->mcc_obj.q.used) == 0) ++ if (atomic_read(&mcc_obj->q.used) == 0) + break; + udelay(100); + } +@@ -151,7 +303,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) + dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); + return -1; + } +- return 0; ++ return status; + } + + /* Notify MCC requests and wait for completion */ +@@ -163,23 +315,34 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) + + static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) + { +- int cnt = 0, wait = 5; ++ int msecs = 0; + u32 ready; + ++ if (adapter->eeh_err) { ++ dev_err(&adapter->pdev->dev, "Error detected in card.Cannot issue commands\n"); ++ return -EIO; ++ } + do { +- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; ++ ready = ioread32(db); ++ if (ready == 0xffffffff) { ++ dev_err(&adapter->pdev->dev, ++ "pci slot disconnected\n"); ++ return -1; ++ } ++ ++ ready &= MPU_MAILBOX_DB_RDY_MASK; + if (ready) + break; + +- if (cnt > 4000000) { ++ if (msecs > 4000) { + dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); ++ be_detect_dump_ue(adapter); + return -1; + } + +- if (cnt > 50) +- wait = 200; +- cnt += wait; +- udelay(wait); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout(msecs_to_jiffies(1)); ++ msecs++; + } while (true); + + return 0; +@@ -198,6 +361,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) + struct be_mcc_mailbox *mbox = mbox_mem->va; + struct be_mcc_compl *compl = &mbox->compl; + ++ /* wait for ready to be set */ ++ status = be_mbox_db_ready_wait(adapter, db); ++ if (status != 0) ++ return status; ++ + val |= MPU_MAILBOX_DB_HI_MASK; + /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ + val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; +@@ -232,7 +400,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) + + static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) + { +- u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); ++ u32 sem; ++ ++ if (lancer_chip(adapter)) ++ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); ++ else ++ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); + + *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; + if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) +@@ -245,30 +418,29 @@ int be_cmd_POST(struct be_adapter *adapter) + { + u16 stage; + int status, timeout = 0; ++ struct device *dev = &adapter->pdev->dev; + + do { + status = be_POST_stage_get(adapter, &stage); + if (status) { +- dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n", +- stage); ++ dev_err(dev, "POST error; stage=0x%x\n", stage); + return -1; + } else if (stage != POST_STAGE_ARMFW_RDY) { + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(2 * HZ); ++ if (schedule_timeout(2 * HZ)) { ++ dev_err(dev, "POST cmd aborted\n"); ++ return -EINTR; ++ } + timeout += 2; + } else { + return 0; + } +- } while (timeout < 20); ++ } while (timeout < 40); + +- dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); ++ dev_err(dev, "POST timeout; stage=0x%x\n", stage); + return -1; + } + +-static inline void *embedded_payload(struct be_mcc_wrb *wrb) +-{ +- return wrb->payload.embedded_payload; +-} + + static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) + { +@@ -277,7 +449,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) + + /* Don't touch the hdr after it's prepared */ + static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, +- bool embedded, u8 sge_cnt) ++ bool embedded, u8 sge_cnt, u32 opcode) + { + if (embedded) + wrb->embedded |= MCC_WRB_EMBEDDED_MASK; +@@ -285,7 +457,8 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, + wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << + MCC_WRB_SGE_CNT_SHIFT; + wrb->payload_length = payload_len; +- be_dws_cpu_to_le(wrb, 20); ++ wrb->tag0 = opcode; ++ be_dws_cpu_to_le(wrb, 8); + } + + /* Don't touch the hdr after it's prepared */ +@@ -295,6 +468,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, + req_hdr->opcode = opcode; + req_hdr->subsystem = subsystem; + req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); ++ req_hdr->version = 0; + } + + static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, +@@ -349,7 +523,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) + struct be_queue_info *mccq = &adapter->mcc_obj.q; + struct be_mcc_wrb *wrb; + +- BUG_ON(atomic_read(&mccq->used) >= mccq->len); ++ if (atomic_read(&mccq->used) >= mccq->len) { ++ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); ++ return NULL; ++ } ++ + wrb = queue_head_node(mccq); + queue_head_inc(mccq); + atomic_inc(&mccq->used); +@@ -357,6 +535,59 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) + return wrb; + } + ++/* Tell fw we're about to start firing cmds by writing a ++ * special pattern across the wrb hdr; uses mbox ++ */ ++int be_cmd_fw_init(struct be_adapter *adapter) ++{ ++ u8 *wrb; ++ int status; ++ ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; ++ ++ wrb = (u8 *)wrb_from_mbox(adapter); ++ *wrb++ = 0xFF; ++ *wrb++ = 0x12; ++ *wrb++ = 0x34; ++ *wrb++ = 0xFF; ++ *wrb++ = 0xFF; ++ *wrb++ = 0x56; ++ *wrb++ = 0x78; ++ *wrb = 0xFF; ++ ++ status = be_mbox_notify_wait(adapter); ++ ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} ++ ++/* Tell fw we're done with firing cmds by writing a ++ * special pattern across the wrb hdr; uses mbox ++ */ ++int be_cmd_fw_clean(struct be_adapter *adapter) ++{ ++ u8 *wrb; ++ int status; ++ ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; ++ ++ wrb = (u8 *)wrb_from_mbox(adapter); ++ *wrb++ = 0xFF; ++ *wrb++ = 0xAA; ++ *wrb++ = 0xBB; ++ *wrb++ = 0xFF; ++ *wrb++ = 0xFF; ++ *wrb++ = 0xCC; ++ *wrb++ = 0xDD; ++ *wrb = 0xFF; ++ ++ status = be_mbox_notify_wait(adapter); ++ ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} + int be_cmd_eq_create(struct be_adapter *adapter, + struct be_queue_info *eq, int eq_delay) + { +@@ -365,20 +596,19 @@ int be_cmd_eq_create(struct be_adapter *adapter, + struct be_dma_mem *q_mem = &eq->dma_mem; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_EQ_CREATE, sizeof(*req)); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + +- AMAP_SET_BITS(struct amap_eq_context, func, req->context, +- be_pci_func(adapter)); + AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); + /* 4byte eqe*/ + AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); +@@ -397,7 +627,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, + eq->created = true; + } + +- spin_unlock(&adapter->mbox_lock); ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + +@@ -409,12 +639,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, + struct be_cmd_req_mac_query *req; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_MAC_QUERY); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req)); +@@ -433,13 +665,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, + memcpy(mac_addr, resp->mac.addr, ETH_ALEN); + } + +- spin_unlock(&adapter->mbox_lock); ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + + /* Uses synchronous MCCQ */ + int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, +- u32 if_id, u32 *pmac_id) ++ u32 if_id, u32 *pmac_id, u32 domain) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_pmac_add *req; +@@ -448,13 +680,19 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_PMAC_ADD); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); + ++ req->hdr.domain = domain; + req->if_id = cpu_to_le32(if_id); + memcpy(req->mac_address, mac_addr, ETH_ALEN); + +@@ -464,12 +702,13 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, + *pmac_id = le32_to_cpu(resp->pmac_id); + } + ++err: + spin_unlock_bh(&adapter->mcc_lock); + return status; + } + + /* Uses synchronous MCCQ */ +-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) ++int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_pmac_del *req; +@@ -478,20 +717,26 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_PMAC_DEL); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); + ++ req->hdr.domain = dom; + req->if_id = cpu_to_le32(if_id); + req->pmac_id = cpu_to_le32(pmac_id); + + status = be_mcc_notify_wait(adapter); + ++err: + spin_unlock_bh(&adapter->mcc_lock); +- + return status; + } + +@@ -506,29 +751,51 @@ int be_cmd_cq_create(struct be_adapter *adapter, + void *ctxt; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_CQ_CREATE); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_CQ_CREATE, sizeof(*req)); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + +- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); +- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); +- AMAP_SET_BITS(struct amap_cq_context, count, ctxt, +- __ilog2_u32(cq->len/256)); +- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); +- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); +- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); +- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); +- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); +- AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter)); ++ if (lancer_chip(adapter)) { ++ req->hdr.version = 2; ++ req->page_size = 1; /* 1 for 4K */ ++ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt, ++ coalesce_wm); ++ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, ++ no_delay); ++ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, ++ __ilog2_u32(cq->len/256)); ++ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); ++ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, ++ ctxt, 1); ++ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, ++ ctxt, eq->id); ++ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1); ++ } else { ++ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, ++ coalesce_wm); ++ AMAP_SET_BITS(struct amap_cq_context_be, nodelay, ++ ctxt, no_delay); ++ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, ++ __ilog2_u32(cq->len/256)); ++ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); ++ AMAP_SET_BITS(struct amap_cq_context_be, solevent, ++ ctxt, sol_evts); ++ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); ++ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); ++ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1); ++ } ++ + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); +@@ -540,8 +807,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, + cq->created = true; + } + +- spin_unlock(&adapter->mbox_lock); +- ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + +@@ -553,7 +819,68 @@ static u32 be_encoded_q_len(int q_len) + return len_encoded; + } + +-int be_cmd_mccq_create(struct be_adapter *adapter, ++int be_cmd_mccq_ext_create(struct be_adapter *adapter, ++ struct be_queue_info *mccq, ++ struct be_queue_info *cq) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_mcc_ext_create *req; ++ struct be_dma_mem *q_mem = &mccq->dma_mem; ++ void *ctxt; ++ int status; ++ ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; ++ ++ wrb = wrb_from_mbox(adapter); ++ req = embedded_payload(wrb); ++ ctxt = &req->context; ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_MCC_CREATE_EXT); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); ++ ++ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); ++ if (lancer_chip(adapter)) { ++ req->hdr.version = 1; ++ req->cq_id = cpu_to_le16(cq->id); ++ ++ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, ++ be_encoded_q_len(mccq->len)); ++ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); ++ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, ++ ctxt, cq->id); ++ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, ++ ctxt, 1); ++ ++ } else { ++ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); ++ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, ++ be_encoded_q_len(mccq->len)); ++ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); ++ } ++ ++ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ ++ req->async_event_bitmap[0] |= cpu_to_le32(0x00000022); ++ ++ be_dws_cpu_to_le(ctxt, sizeof(req->context)); ++ ++ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); ++ ++ status = be_mbox_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); ++ mccq->id = le16_to_cpu(resp->id); ++ mccq->created = true; ++ } ++ ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} ++ ++int be_cmd_mccq_org_create(struct be_adapter *adapter, + struct be_queue_info *mccq, + struct be_queue_info *cq) + { +@@ -563,24 +890,25 @@ int be_cmd_mccq_create(struct be_adapter *adapter, + void *ctxt; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_MCC_CREATE); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MCC_CREATE, sizeof(*req)); + +- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); ++ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + +- AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter)); +- AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); +- AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, +- be_encoded_q_len(mccq->len)); +- AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); ++ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); ++ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, ++ be_encoded_q_len(mccq->len)); ++ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + +@@ -592,75 +920,93 @@ int be_cmd_mccq_create(struct be_adapter *adapter, + mccq->id = le16_to_cpu(resp->id); + mccq->created = true; + } +- spin_unlock(&adapter->mbox_lock); + ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + +-int be_cmd_txq_create(struct be_adapter *adapter, +- struct be_queue_info *txq, ++int be_cmd_mccq_create(struct be_adapter *adapter, ++ struct be_queue_info *mccq, + struct be_queue_info *cq) + { ++ int status; ++ ++ status = be_cmd_mccq_ext_create(adapter, mccq, cq); ++ if (status && !lancer_chip(adapter)) { ++ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " ++ "or newer to avoid conflicting priorities between NIC " ++ "and FCoE traffic"); ++ status = be_cmd_mccq_org_create(adapter, mccq, cq); ++ } ++ return status; ++} ++ ++int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq, ++ struct be_queue_info *cq, u8 *tc_id) ++{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_eth_tx_create *req; + struct be_dma_mem *q_mem = &txq->dma_mem; +- void *ctxt; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); +- ctxt = &req->context; +- +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_TX_CREATE); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, + sizeof(*req)); + +- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); ++ if (adapter->flags & BE_FLAGS_DCBX || lancer_chip(adapter)) { ++ req->hdr.version = 1; ++ req->if_id = cpu_to_le16(adapter->if_handle); ++ } ++ if (adapter->flags & BE_FLAGS_DCBX) ++ req->type = cpu_to_le16(ETX_QUEUE_TYPE_PRIORITY); ++ else ++ req->type = cpu_to_le16(ETX_QUEUE_TYPE_STANDARD); + req->ulp_num = BE_ULP1_NUM; +- req->type = BE_ETH_TX_RING_TYPE_STANDARD; +- +- AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, +- be_encoded_q_len(txq->len)); +- AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt, +- be_pci_func(adapter)); +- AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); +- AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); +- +- be_dws_cpu_to_le(ctxt, sizeof(req->context)); +- ++ req->cq_id = cpu_to_le16(cq->id); ++ req->queue_size = be_encoded_q_len(txq->len); ++ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); + txq->id = le16_to_cpu(resp->cid); ++ if (adapter->flags & BE_FLAGS_DCBX) ++ *tc_id = resp->tc_id; + txq->created = true; + } + +- spin_unlock(&adapter->mbox_lock); +- ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + +-/* Uses mbox */ ++/* Uses MCC */ + int be_cmd_rxq_create(struct be_adapter *adapter, + struct be_queue_info *rxq, u16 cq_id, u16 frag_size, +- u16 max_frame_size, u32 if_id, u32 rss) ++ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_eth_rx_create *req; + struct be_dma_mem *q_mem = &rxq->dma_mem; + int status; + +- spin_lock(&adapter->mbox_lock); ++ spin_lock_bh(&adapter->mcc_lock); + +- wrb = wrb_from_mbox(adapter); ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_ETH_RX_CREATE); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, + sizeof(*req)); +@@ -673,15 +1019,16 @@ int be_cmd_rxq_create(struct be_adapter *adapter, + req->max_frame_size = cpu_to_le16(max_frame_size); + req->rss_queue = cpu_to_le32(rss); + +- status = be_mbox_notify_wait(adapter); ++ status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); + rxq->id = le16_to_cpu(resp->id); + rxq->created = true; ++ *rss_id = resp->rss_id; + } + +- spin_unlock(&adapter->mbox_lock); +- ++err: ++ spin_unlock_bh(&adapter->mcc_lock); + return status; + } + +@@ -696,13 +1043,12 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, + u8 subsys = 0, opcode = 0; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); +- + switch (queue_type) { + case QTYPE_EQ: + subsys = CMD_SUBSYSTEM_COMMON; +@@ -727,13 +1073,47 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, + default: + BUG(); + } ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode); ++ + be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); + req->id = cpu_to_le16(q->id); + + status = be_mbox_notify_wait(adapter); ++ if (!status) ++ q->created = false; + +- spin_unlock(&adapter->mbox_lock); ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} + ++/* Uses MCC */ ++int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_q_destroy *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY); ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY, ++ sizeof(*req)); ++ req->id = cpu_to_le16(q->id); ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) ++ q->created = false; ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); + return status; + } + +@@ -741,22 +1121,26 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, + * Uses mbox + */ + int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, +- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) ++ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id, ++ u32 domain) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_if_create *req; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_INTERFACE_CREATE); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); + ++ req->hdr.domain = domain; + req->capability_flags = cpu_to_le32(cap_flags); + req->enable_flags = cpu_to_le32(en_flags); + req->pmac_invalid = pmac_invalid; +@@ -771,33 +1155,35 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, + *pmac_id = le32_to_cpu(resp->pmac_id); + } + +- spin_unlock(&adapter->mbox_lock); ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + + /* Uses mbox */ +-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) ++int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_if_destroy *req; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_INTERFACE_DESTROY); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); + ++ req->hdr.domain = domain; + req->interface_id = cpu_to_le32(interface_id); + + status = be_mbox_notify_wait(adapter); + +- spin_unlock(&adapter->mbox_lock); +- ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + +@@ -808,33 +1194,48 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) + int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) + { + struct be_mcc_wrb *wrb; +- struct be_cmd_req_get_stats *req; ++ struct be_cmd_req_hdr *hdr; + struct be_sge *sge; ++ int status = 0; ++ ++ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) ++ be_cmd_get_die_temperature(adapter); + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); +- req = nonemb_cmd->va; ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ hdr = nonemb_cmd->va; + sge = nonembedded_sgl(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); +- wrb->tag0 = OPCODE_ETH_GET_STATISTICS; ++ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1, ++ OPCODE_ETH_GET_STATISTICS); + +- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, +- OPCODE_ETH_GET_STATISTICS, sizeof(*req)); ++ be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, ++ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size); ++ ++ if (adapter->generation == BE_GEN3) ++ hdr->version = 1; ++ ++ wrb->tag1 = CMD_SUBSYSTEM_ETH; + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); + sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(nonemb_cmd->size); + + be_mcc_notify(adapter); ++ adapter->stats_cmd_sent = true; + ++err: + spin_unlock_bh(&adapter->mcc_lock); +- return 0; ++ return status; + } + + /* Uses synchronous mcc */ + int be_cmd_link_status_query(struct be_adapter *adapter, +- bool *link_up) ++ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_link_status *req; +@@ -843,50 +1244,216 @@ int be_cmd_link_status_query(struct be_adapter *adapter, + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- *link_up = false; ++ *link_status = LINK_DOWN; + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); + ++ req->hdr.domain = dom; ++ + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_link_status *resp = embedded_payload(wrb); +- if (resp->mac_speed != PHY_LINK_SPEED_ZERO) +- *link_up = true; ++ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { ++ *link_status = LINK_UP; ++ *link_speed = le16_to_cpu(resp->link_speed); ++ *mac_speed = resp->mac_speed; ++ } + } + ++err: + spin_unlock_bh(&adapter->mcc_lock); + return status; + } + +-/* Uses Mbox */ +-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) ++/* Uses synchronous mcc */ ++int be_cmd_get_die_temperature(struct be_adapter *adapter) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_cntl_addnl_attribs *req; ++ u16 mccq_index; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ mccq_index = adapter->mcc_obj.q.head; ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req)); ++ ++ wrb->tag1 = mccq_index; ++ ++ be_mcc_notify(adapter); ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++ ++/* Uses synchronous mcc */ ++int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_fat *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_MANAGE_FAT); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_MANAGE_FAT, sizeof(*req)); ++ req->fat_operation = cpu_to_le32(QUERY_FAT); ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); ++ if (log_size && resp->log_size) ++ *log_size = le32_to_cpu(resp->log_size) - ++ sizeof(u32); ++ } ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) ++{ ++ struct be_dma_mem get_fat_cmd; ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_fat *req; ++ struct be_sge *sge; ++ u32 offset = 0, total_size, buf_size, ++ log_offset = sizeof(u32), payload_len; ++ int status; ++ ++ if (buf_len == 0) ++ return; ++ ++ total_size = buf_len; ++ ++ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; ++ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, ++ get_fat_cmd.size, ++ &get_fat_cmd.dma); ++ if (!get_fat_cmd.va) { ++ status = -ENOMEM; ++ dev_err(&adapter->pdev->dev, ++ "Memory allocation failure while retrieving FAT data\n"); ++ return; ++ } ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ while (total_size) { ++ buf_size = min(total_size, (u32)60*1024); ++ total_size -= buf_size; ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = get_fat_cmd.va; ++ sge = nonembedded_sgl(wrb); ++ ++ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; ++ be_wrb_hdr_prepare(wrb, payload_len, false, 1, ++ OPCODE_COMMON_MANAGE_FAT); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_MANAGE_FAT, payload_len); ++ ++ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma)); ++ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(get_fat_cmd.size); ++ ++ req->fat_operation = cpu_to_le32(RETRIEVE_FAT); ++ req->read_log_offset = cpu_to_le32(log_offset); ++ req->read_log_length = cpu_to_le32(buf_size); ++ req->data_buffer_size = cpu_to_le32(buf_size); ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; ++ memcpy(buf + offset, ++ resp->data_buffer, ++ le32_to_cpu(resp->read_log_length)); ++ } else { ++ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); ++ goto err; ++ } ++ offset += buf_size; ++ log_offset += buf_size; ++ } ++err: ++ pci_free_consistent(adapter->pdev, get_fat_cmd.size, ++ get_fat_cmd.va, ++ get_fat_cmd.dma); ++ spin_unlock_bh(&adapter->mcc_lock); ++} ++ ++/* Uses synchronous mcc */ ++int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, ++ char *fw_on_flash) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_fw_version *req; + int status; + +- spin_lock(&adapter->mbox_lock); ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + +- wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_FW_VERSION); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FW_VERSION, sizeof(*req)); + +- status = be_mbox_notify_wait(adapter); ++ status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); +- strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); ++ strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN-1); ++ if (fw_on_flash) ++ strncpy(fw_on_flash, resp->fw_on_flash_version_string, ++ FW_VER_LEN-1); + } +- +- spin_unlock(&adapter->mbox_lock); ++err: ++ spin_unlock_bh(&adapter->mcc_lock); + return status; + } + +@@ -897,13 +1464,19 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_modify_eq_delay *req; ++ int status = 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_MODIFY_EQ_DELAY); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); +@@ -915,8 +1488,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) + + be_mcc_notify(adapter); + ++err: + spin_unlock_bh(&adapter->mcc_lock); +- return 0; ++ return status; + } + + /* Uses sycnhronous mcc */ +@@ -930,9 +1504,14 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_VLAN_CONFIG); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req)); +@@ -948,79 +1527,63 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, + + status = be_mcc_notify_wait(adapter); + ++err: + spin_unlock_bh(&adapter->mcc_lock); + return status; + } + +-/* Uses MCC for this command as it may be called in BH context +- * Uses synchronous mcc +- */ +-int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) ++int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) + { + struct be_mcc_wrb *wrb; +- struct be_cmd_req_promiscuous_config *req; ++ struct be_dma_mem *mem = &adapter->rx_filter; ++ struct be_cmd_req_rx_filter *req = mem->va; ++ struct be_sge *sge; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); +- req = embedded_payload(wrb); +- +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); +- +- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, +- OPCODE_ETH_PROMISCUOUS, sizeof(*req)); +- +- if (port_num) +- req->port1_promiscuous = en; +- else +- req->port0_promiscuous = en; +- +- status = be_mcc_notify_wait(adapter); +- +- spin_unlock_bh(&adapter->mcc_lock); +- return status; +-} +- +-/* +- * Uses MCC for this command as it may be called in BH context +- * (mc == NULL) => multicast promiscous +- */ +-int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, +- struct dev_mc_list *mc_list, u32 mc_count) +-{ +-#define BE_MAX_MC 32 /* set mcast promisc if > 32 */ +- struct be_mcc_wrb *wrb; +- struct be_cmd_req_mcast_mac_config *req; +- +- spin_lock_bh(&adapter->mcc_lock); +- +- wrb = wrb_from_mccq(adapter); +- req = embedded_payload(wrb); +- +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); +- ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ sge = nonembedded_sgl(wrb); ++ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); ++ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(mem->size); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, ++ OPCODE_COMMON_NTWK_RX_FILTER); ++ ++ memset(req, 0, sizeof(*req)); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, +- OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); ++ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req)); + +- req->interface_id = if_id; +- if (mc_list && mc_count <= BE_MAX_MC) { +- int i; +- struct dev_mc_list *mc; +- +- req->num_mac = cpu_to_le16(mc_count); +- +- for (mc = mc_list, i = 0; mc; mc = mc->next, i++) +- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); ++ req->if_id = cpu_to_le32(adapter->if_handle); ++ if (flags & IFF_PROMISC) { ++ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | ++ BE_IF_FLAGS_VLAN_PROMISCUOUS); ++ if (value == ON) ++ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | ++ BE_IF_FLAGS_VLAN_PROMISCUOUS); ++ } else if (flags & IFF_ALLMULTI) { ++ req->if_flags_mask = req->if_flags = ++ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); + } else { +- req->promiscuous = 1; +- } ++ struct netdev_hw_addr *ha; ++ int i = 0; + +- be_mcc_notify_wait(adapter); ++ req->if_flags_mask = req->if_flags = ++ cpu_to_le32(BE_IF_FLAGS_MULTICAST); ++ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev)); ++ netdev_for_each_mc_addr(ha, adapter->netdev) ++ memcpy(req->mcast_mac[i++].byte, ha->DMI_ADDR, ++ ETH_ALEN); ++ } ++ status = be_mcc_notify_wait(adapter); + ++err: + spin_unlock_bh(&adapter->mcc_lock); +- +- return 0; ++ return status; + } + + /* Uses synchrounous mcc */ +@@ -1033,9 +1596,14 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_SET_FLOW_CONTROL); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req)); +@@ -1045,6 +1613,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) + + status = be_mcc_notify_wait(adapter); + ++err: + spin_unlock_bh(&adapter->mcc_lock); + return status; + } +@@ -1059,9 +1628,14 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_FLOW_CONTROL); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req)); +@@ -1074,23 +1648,27 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) + *rx_fc = le16_to_cpu(resp->rx_flow_control); + } + ++err: + spin_unlock_bh(&adapter->mcc_lock); + return status; + } + + /* Uses mbox */ +-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) ++int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, ++ u32 *mode, u32 *function_caps) + { + struct be_mcc_wrb *wrb; + struct be_cmd_req_query_fw_cfg *req; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); +@@ -1099,10 +1677,11 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) + if (!status) { + struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); + *port_num = le32_to_cpu(resp->phys_port); +- *cap = le32_to_cpu(resp->function_cap); ++ *mode = le32_to_cpu(resp->function_mode); ++ *function_caps = le32_to_cpu(resp->function_caps); + } + +- spin_unlock(&adapter->mbox_lock); ++ mutex_unlock(&adapter->mbox_lock); + return status; + } + +@@ -1113,19 +1692,161 @@ int be_cmd_reset_function(struct be_adapter *adapter) + struct be_cmd_req_hdr *req; + int status; + +- spin_lock(&adapter->mbox_lock); ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + +- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_FUNCTION_RESET); + + be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); + + status = be_mbox_notify_wait(adapter); + +- spin_unlock(&adapter->mbox_lock); ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} ++ ++int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_rss_config *req; ++ u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF, ++ 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF}; ++ int status; ++ ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; ++ ++ wrb = wrb_from_mbox(adapter); ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_ETH_RSS_CONFIG); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, ++ OPCODE_ETH_RSS_CONFIG, sizeof(*req)); ++ ++ req->if_id = cpu_to_le32(adapter->if_handle); ++ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4); ++ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); ++ memcpy(req->cpu_table, rsstable, table_size); ++ memcpy(req->hash, myhash, sizeof(myhash)); ++ be_dws_cpu_to_le(req->hash, sizeof(req->hash)); ++ ++ status = be_mbox_notify_wait(adapter); ++ ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} ++ ++/* Uses sync mcc */ ++int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, ++ u8 bcn, u8 sts, u8 state) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_enable_disable_beacon *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_ENABLE_DISABLE_BEACON); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req)); ++ ++ req->port_num = port_num; ++ req->beacon_state = state; ++ req->beacon_duration = bcn; ++ req->status_duration = sts; ++ ++ status = be_mcc_notify_wait(adapter); ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++/* Uses sync mcc */ ++int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_beacon_state *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_BEACON_STATE); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req)); ++ ++ req->port_num = port_num; ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_beacon_state *resp = ++ embedded_payload(wrb); ++ *state = resp->beacon_state; ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++/* Uses sync mcc */ ++int be_cmd_read_port_type(struct be_adapter *adapter, u32 port, ++ u8 *connector) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_port_type *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0, ++ OPCODE_COMMON_READ_TRANSRECV_DATA); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req)); ++ ++ req->port = cpu_to_le32(port); ++ req->page_num = cpu_to_le32(TR_PAGE_A0); ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_port_type *resp = embedded_payload(wrb); ++ *connector = resp->data.connector; ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); + return status; + } + +@@ -1133,16 +1854,24 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 flash_type, u32 flash_opcode, u32 buf_size) + { + struct be_mcc_wrb *wrb; +- struct be_cmd_write_flashrom *req = cmd->va; ++ struct be_cmd_write_flashrom *req; + struct be_sge *sge; + int status; + + spin_lock_bh(&adapter->mcc_lock); ++ adapter->flash_status = 0; + + wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err_unlock; ++ } ++ req = cmd->va; + sge = nonembedded_sgl(wrb); + +- be_wrb_hdr_prepare(wrb, cmd->size, false, 1); ++ be_wrb_hdr_prepare(wrb, cmd->size, false, 1, ++ OPCODE_COMMON_WRITE_FLASHROM); ++ wrb->tag1 = CMD_SUBSYSTEM_COMMON; + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_WRITE_FLASHROM, cmd->size); +@@ -1154,8 +1883,852 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, + req->params.op_code = cpu_to_le32(flash_opcode); + req->params.data_buf_size = cpu_to_le32(buf_size); + ++ be_mcc_notify(adapter); ++ spin_unlock_bh(&adapter->mcc_lock); ++ ++ if (!wait_for_completion_timeout(&adapter->flash_compl, ++ msecs_to_jiffies(40000))) ++ status = -1; ++ else ++ status = adapter->flash_status; ++ ++ return status; ++ ++err_unlock: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, ++ int offset) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_write_flashrom *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0, ++ OPCODE_COMMON_READ_FLASHROM); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4); ++ ++ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); ++ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); ++ req->params.offset = cpu_to_le32(offset); ++ req->params.data_buf_size = cpu_to_le32(0x4); ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) ++ memcpy(flashed_crc, req->params.data_buf, 4); ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, ++ struct be_dma_mem *nonemb_cmd) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_acpi_wol_magic_config *req; ++ struct be_sge *sge; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = nonemb_cmd->va; ++ sge = nonembedded_sgl(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, ++ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, ++ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req)); ++ memcpy(req->magic_mac, mac, ETH_ALEN); ++ ++ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); ++ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(nonemb_cmd->size); ++ ++ status = be_mcc_notify_wait(adapter); ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, ++ u8 loopback_type, u8 enable) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_set_lmode *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, ++ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, ++ sizeof(*req)); ++ ++ req->src_port = port_num; ++ req->dest_port = port_num; ++ req->loopback_type = loopback_type; ++ req->loopback_state = enable; ++ ++ status = be_mcc_notify_wait(adapter); ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, ++ u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_loopback_test *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_LOWLEVEL_LOOPBACK_TEST); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, ++ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); ++ req->hdr.timeout = cpu_to_le32(4); ++ ++ req->pattern = cpu_to_le64(pattern); ++ req->src_port = cpu_to_le32(port_num); ++ req->dest_port = cpu_to_le32(port_num); ++ req->pkt_size = cpu_to_le32(pkt_size); ++ req->num_pkts = cpu_to_le32(num_pkts); ++ req->loopback_type = cpu_to_le32(loopback_type); ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); ++ status = le32_to_cpu(resp->status); ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, ++ u32 byte_cnt, struct be_dma_mem *cmd) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_ddrdma_test *req; ++ struct be_sge *sge; ++ int status; ++ int i, j = 0; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = cmd->va; ++ sge = nonembedded_sgl(wrb); ++ be_wrb_hdr_prepare(wrb, cmd->size, false, 1, ++ OPCODE_LOWLEVEL_HOST_DDR_DMA); ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, ++ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size); ++ ++ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma)); ++ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(cmd->size); ++ ++ req->pattern = cpu_to_le64(pattern); ++ req->byte_count = cpu_to_le32(byte_cnt); ++ for (i = 0; i < byte_cnt; i++) { ++ req->snd_buff[i] = (u8)(pattern >> (j*8)); ++ j++; ++ if (j > 7) ++ j = 0; ++ } ++ ++ status = be_mcc_notify_wait(adapter); ++ ++ if (!status) { ++ struct be_cmd_resp_ddrdma_test *resp; ++ resp = cmd->va; ++ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || ++ resp->snd_err) { ++ status = -1; ++ } ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_get_seeprom_data(struct be_adapter *adapter, ++ struct be_dma_mem *nonemb_cmd) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_seeprom_read *req; ++ struct be_sge *sge; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ req = nonemb_cmd->va; ++ sge = nonembedded_sgl(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, ++ OPCODE_COMMON_SEEPROM_READ); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_SEEPROM_READ, sizeof(*req)); ++ ++ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); ++ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(nonemb_cmd->size); ++ ++ status = be_mcc_notify_wait(adapter); ++ ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_get_phy_info(struct be_adapter *adapter, ++ struct be_phy_info *phy_info) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_phy_info *req; ++ struct be_sge *sge; ++ struct be_dma_mem cmd; ++ struct be_phy_info *resp_phy_info; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ cmd.size = sizeof(struct be_cmd_req_get_phy_info); ++ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, ++ &cmd.dma); ++ if (!cmd.va) { ++ dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); ++ status = -ENOMEM; ++ goto err; ++ } ++ ++ req = cmd.va; ++ sge = nonembedded_sgl(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, ++ OPCODE_COMMON_GET_PHY_DETAILS); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_PHY_DETAILS, ++ sizeof(*req)); ++ ++ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma)); ++ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(cmd.size); ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr); ++ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); ++ phy_info->interface_type = ++ le16_to_cpu(resp_phy_info->interface_type); ++ phy_info->auto_speeds_supported = ++ le16_to_cpu(resp_phy_info->auto_speeds_supported); ++ phy_info->fixed_speeds_supported = ++ le16_to_cpu(resp_phy_info->fixed_speeds_supported); ++ phy_info->misc_params = ++ le32_to_cpu(resp_phy_info->misc_params); ++ } ++ pci_free_consistent(adapter->pdev, cmd.size, ++ cmd.va, cmd.dma); ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_set_qos *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_SET_QOS); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_SET_QOS, sizeof(*req)); ++ ++ req->hdr.domain = domain; ++ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); ++ req->max_bps_nic = cpu_to_le32(bps); ++ ++ status = be_mcc_notify_wait(adapter); ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_get_cntl_attributes(struct be_adapter *adapter) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_cntl_attribs *req; ++ struct be_cmd_resp_cntl_attribs *resp; ++ struct be_sge *sge; ++ int status; ++ int payload_len = max(sizeof(*req), sizeof(*resp)); ++ struct mgmt_controller_attrib *attribs; ++ struct be_dma_mem attribs_cmd; ++ ++ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); ++ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); ++ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, ++ &attribs_cmd.dma); ++ if (!attribs_cmd.va) { ++ dev_err(&adapter->pdev->dev, ++ "Memory allocation failure\n"); ++ return -ENOMEM; ++ } ++ ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; ++ ++ wrb = wrb_from_mbox(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = attribs_cmd.va; ++ sge = nonembedded_sgl(wrb); ++ ++ be_wrb_hdr_prepare(wrb, payload_len, false, 1, ++ OPCODE_COMMON_GET_CNTL_ATTRIBUTES); ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len); ++ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma)); ++ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(attribs_cmd.size); ++ ++ status = be_mbox_notify_wait(adapter); ++ if (!status) { ++ attribs = (struct mgmt_controller_attrib *)(attribs_cmd.va + ++ sizeof(struct be_cmd_resp_hdr)); ++ adapter->hba_port_num = attribs->hba_attribs.phy_port; ++ strncpy(adapter->model_number, ++ attribs->hba_attribs.controller_model_number, 31); ++ } ++ ++err: ++ mutex_unlock(&adapter->mbox_lock); ++ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, ++ attribs_cmd.dma); ++ return status; ++} ++ ++/* Uses mbox */ ++int be_cmd_req_native_mode(struct be_adapter *adapter) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_set_func_cap *req; ++ int status; ++ ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; ++ ++ wrb = wrb_from_mbox(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req)); ++ ++ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | ++ CAPABILITY_BE3_NATIVE_ERX_API); ++ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); ++ ++ status = be_mbox_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); ++ adapter->be3_native = le32_to_cpu(resp->cap_flags) & ++ CAPABILITY_BE3_NATIVE_ERX_API; ++ } ++err: ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} ++ ++static void encode_port_names(struct be_adapter *adapter) ++{ ++ switch (adapter->port_name[adapter->hba_port_num]) { ++ case '0': ++ adapter->port_name[adapter->hba_port_num] = 0; ++ break; ++ case '1': ++ adapter->port_name[adapter->hba_port_num] = 1; ++ break; ++ case '2': ++ adapter->port_name[adapter->hba_port_num] = 2; ++ break; ++ case '3': ++ adapter->port_name[adapter->hba_port_num] = 3; ++ break; ++ case '4': ++ adapter->port_name[adapter->hba_port_num] = 4; ++ break; ++ case 'A': ++ adapter->port_name[adapter->hba_port_num] = 5; ++ break; ++ case 'B': ++ adapter->port_name[adapter->hba_port_num] = 6; ++ break; ++ case 'C': ++ adapter->port_name[adapter->hba_port_num] = 7; ++ break; ++ case 'D': ++ adapter->port_name[adapter->hba_port_num] = 8; ++ break; ++ } ++} ++ ++int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_port_name *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_PORT_NAME); ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req)); ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); ++ port_name[0] = resp->port0_name; ++ port_name[1] = resp->port1_name; ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ ++ if(!status) ++ encode_port_names(adapter); ++ return status; ++} ++ ++int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_port_name *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_PORT_NAME); ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req)); ++ req->hdr.version = 1; ++ + status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_port_name_v1 *resp = embedded_payload(wrb); ++ port_name[0] = resp->port0_name; ++ port_name[1] = resp->port1_name; ++ port_name[2] = resp->port2_name; ++ port_name[3] = resp->port3_name; ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ ++ if (!status) ++ encode_port_names(adapter); ++ return status; ++} ++ ++int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_pg *req; ++ int status, num = 0; ++ bool query = true; ++ ++ *fw_num_txqs = MAX_TX_QS; ++ ++ if (mutex_lock_interruptible(&adapter->mbox_lock)) ++ return -1; ++ ++enable_pfc: ++ wrb = wrb_from_mbox(adapter); ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, ++ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST, sizeof(*req)); ++ ++ if (query) ++ req->query |= cpu_to_le32(REQ_PG_QUERY); ++ req->pfc_pg |= cpu_to_le32(REQ_PG_FEAT); ++ ++ status = be_mbox_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_pg *resp = embedded_payload(wrb); ++ if (query) { ++ if (le32_to_cpu(resp->pfc_pg) & REQ_PG_FEAT) { ++ num = le32_to_cpu(resp->num_tx_rings); ++ query = false; ++ goto enable_pfc; ++ } ++ } else { ++ adapter->flags |= BE_FLAGS_DCBX; ++ *fw_num_txqs = num; ++ } ++ } ++ ++ mutex_unlock(&adapter->mbox_lock); ++ return status; ++} ++ ++/* Set privilege(s) for a function */ ++int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 mask, u32 *prev, ++ u32 domain) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_set_fn_privileges *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_SET_FN_PRIVILEGES); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req)); ++ ++ req->hdr.domain = domain; ++ req->privilege_mask = cpu_to_le32(mask); ++ ++ status = be_mcc_notify_wait(adapter); ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++/* Get privilege(s) for a function */ ++int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, ++ u32 domain) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_fn_privileges *req; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_FN_PRIVILEGES); + ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req)); ++ ++ req->hdr.domain = domain; ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_fn_privileges *resp = ++ embedded_payload(wrb); ++ *privilege = le32_to_cpu(resp->privilege_mask); ++ } else ++ *privilege = 0; ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++/* Set Hyper switch config */ ++int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, ++ u32 domain, u16 intf_id) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_set_hsw_config *req; ++ void *ctxt; ++ int status; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ctxt = &req->context; ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_SET_HSW_CONFIG); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req)); ++ ++ req->hdr.domain = domain; ++ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); ++ if (pvid) { ++ AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); ++ AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); ++ } ++ ++ be_dws_cpu_to_le(req->context, sizeof(req->context)); ++ status = be_mcc_notify_wait(adapter); ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++/* Get Hyper switch config */ ++int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, ++ u32 domain, u16 intf_id) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_hsw_config *req; ++ void *ctxt; ++ int status; ++ u16 vid; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ ctxt = &req->context; ++ ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_GET_HSW_CONFIG); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req)); ++ ++ req->hdr.domain = domain; ++ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, ++ intf_id); ++ AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); ++ be_dws_cpu_to_le(req->context, sizeof(req->context)); ++ ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_hsw_config *resp = ++ embedded_payload(wrb); ++ be_dws_le_to_cpu(&resp->context, ++ sizeof(resp->context)); ++ vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, ++ pvid, &resp->context); ++ *pvid = le16_to_cpu(vid); ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_get_port_speed(struct be_adapter *adapter, ++ u8 port_num, u16 *dac_cable_len, u16 *port_speed) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_get_port_speed *req; ++ int status = 0; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ ++ req = embedded_payload(wrb); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_GET_LINK_SPEED); ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_NTWK_GET_LINK_SPEED, ++ sizeof(*req)); ++ req->port_num = port_num; ++ status = be_mcc_notify_wait(adapter); ++ if (!status) { ++ struct be_cmd_resp_get_port_speed *resp = ++ embedded_payload(wrb); ++ *dac_cable_len = resp->dac_cable_length; ++ *port_speed = resp->mac_speed; ++ } ++ ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++int be_cmd_set_port_speed_v1(struct be_adapter *adapter, ++ u8 port_num, u16 mac_speed, ++ u16 dac_cable_len) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_cmd_req_set_port_speed_v1 *req; ++ int status = 0; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ req = embedded_payload(wrb); ++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, ++ OPCODE_COMMON_NTWK_SET_LINK_SPEED); ++ ++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, ++ OPCODE_COMMON_NTWK_SET_LINK_SPEED, ++ sizeof(*req)); ++ req->hdr.version=1; ++ ++ req->port_num = port_num; ++ req->virt_port = port_num; ++ req->mac_speed = mac_speed; ++ req->dac_cable_length = dac_cable_len; ++ status = be_mcc_notify_wait(adapter); ++err: ++ spin_unlock_bh(&adapter->mcc_lock); ++ return status; ++} ++ ++ ++/* Uses sync mcc */ ++#ifdef CONFIG_PALAU ++int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma, ++ int req_size, void *va) ++{ ++ struct be_mcc_wrb *wrb; ++ struct be_sge *sge; ++ int status; ++ struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) va; ++ ++ spin_lock_bh(&adapter->mcc_lock); ++ ++ wrb = wrb_from_mccq(adapter); ++ if (!wrb) { ++ status = -EBUSY; ++ goto err; ++ } ++ sge = nonembedded_sgl(wrb); ++ ++ be_wrb_hdr_prepare(wrb, req_size, false, 1, hdr->opcode); ++ wrb->tag1 = MCC_WRB_PASS_THRU; ++ sge->pa_hi = cpu_to_le32(upper_32_bits(dma)); ++ sge->pa_lo = cpu_to_le32(dma & 0xFFFFFFFF); ++ sge->len = cpu_to_le32(req_size); ++ ++ status = be_mcc_notify_wait(adapter); ++err: + spin_unlock_bh(&adapter->mcc_lock); + return status; + } ++#endif +diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h +index ad33d55..35aa5c7 100644 +--- a/drivers/net/benet/be_cmds.h ++++ b/drivers/net/benet/be_cmds.h +@@ -1,20 +1,23 @@ + /* +- * Copyright (C) 2005 - 2009 ServerEngines ++ * Copyright (C) 2005 - 2011 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation. The full GNU General ++ * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: +- * linux-drivers@serverengines.com ++ * linux-drivers@emulex.com + * +- * ServerEngines +- * 209 N. Fair Oaks Ave +- * Sunnyvale, CA 94085 ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 + */ + ++#ifndef BE_CMDS_H ++#define BE_CMDS_H ++ + /* + * The driver sends configuration and managements command requests to the + * firmware in the BE. These requests are communicated to the processor +@@ -29,9 +32,10 @@ struct be_sge { + u32 len; + }; + +-#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/ ++#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/ + #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */ + #define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */ ++#define MCC_WRB_PASS_THRU 0xFF /* this wrb is used for pass thru cmd */ + struct be_mcc_wrb { + u32 embedded; /* dword 0 */ + u32 payload_length; /* dword 1 */ +@@ -44,24 +48,19 @@ struct be_mcc_wrb { + } payload; + }; + +-#define CQE_FLAGS_VALID_MASK (1 << 31) +-#define CQE_FLAGS_ASYNC_MASK (1 << 30) +-#define CQE_FLAGS_COMPLETED_MASK (1 << 28) +-#define CQE_FLAGS_CONSUMED_MASK (1 << 27) ++#define CQE_FLAGS_VALID_MASK (1 << 31) ++#define CQE_FLAGS_ASYNC_MASK (1 << 30) ++#define CQE_FLAGS_COMPLETED_MASK (1 << 28) ++#define CQE_FLAGS_CONSUMED_MASK (1 << 27) + + /* Completion Status */ + enum { +- MCC_STATUS_SUCCESS = 0x0, +-/* The client does not have sufficient privileges to execute the command */ +- MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1, +-/* A parameter in the command was invalid. */ +- MCC_STATUS_INVALID_PARAMETER = 0x2, +-/* There are insufficient chip resources to execute the command */ +- MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3, +-/* The command is completing because the queue was getting flushed */ +- MCC_STATUS_QUEUE_FLUSHING = 0x4, +-/* The command is completing with a DMA error */ +- MCC_STATUS_DMA_FAILED = 0x5, ++ MCC_STATUS_SUCCESS = 0, ++ MCC_STATUS_FAILED = 1, ++ MCC_STATUS_ILLEGAL_REQUEST = 2, ++ MCC_STATUS_ILLEGAL_FIELD = 3, ++ MCC_STATUS_INSUFFICIENT_BUFFER = 4, ++ MCC_STATUS_UNAUTHORIZED_REQUEST = 5, + MCC_STATUS_NOT_SUPPORTED = 66 + }; + +@@ -81,15 +80,24 @@ struct be_mcc_compl { + * mcc_compl is interpreted as follows: + */ + #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ ++#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */ + #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF ++#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF + #define ASYNC_EVENT_CODE_LINK_STATE 0x1 ++#define ASYNC_EVENT_CODE_GRP_5 0x5 ++#define ASYNC_EVENT_QOS_SPEED 0x1 ++#define ASYNC_EVENT_COS_PRIORITY 0x2 ++#define ASYNC_EVENT_PVID_STATE 0x3 ++#define GRP5_TYPE_PRIO_TC_MAP 4 ++ + struct be_async_event_trailer { + u32 code; + }; + + enum { +- ASYNC_EVENT_LINK_DOWN = 0x0, +- ASYNC_EVENT_LINK_UP = 0x1 ++ ASYNC_EVENT_LINK_DOWN = 0x0, ++ ASYNC_EVENT_LINK_UP = 0x1, ++ ASYNC_EVENT_LOGICAL = 0x2 + }; + + /* When the event code of an async trailer is link-state, the mcc_compl +@@ -101,7 +109,51 @@ struct be_async_event_link_state { + u8 port_duplex; + u8 port_speed; + u8 port_fault; +- u8 rsvd0[7]; ++ u8 rsvd0; ++ u16 qos_link_speed; ++ u32 event_tag; ++ struct be_async_event_trailer trailer; ++} __packed; ++ ++/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED ++ * the mcc_compl must be interpreted as follows ++ */ ++struct be_async_event_grp5_qos_link_speed { ++ u8 physical_port; ++ u8 rsvd[5]; ++ u16 qos_link_speed; ++ u32 event_tag; ++ struct be_async_event_trailer trailer; ++} __packed; ++ ++/* When the event code of an async trailer is GRP5 and event type is ++ * CoS-Priority, the mcc_compl must be interpreted as follows ++ */ ++struct be_async_event_grp5_cos_priority { ++ u8 physical_port; ++ u8 available_priority_bmap; ++ u8 reco_default_priority; ++ u8 valid; ++ u8 rsvd0; ++ u8 event_tag; ++ struct be_async_event_trailer trailer; ++} __packed; ++ ++/* When the event code of an async trailer is GRP5 and event type is ++ * PVID state, the mcc_compl must be interpreted as follows ++ */ ++struct be_async_event_grp5_pvid_state { ++ u8 enabled; ++ u8 rsvd0; ++ u16 tag; ++ u32 event_tag; ++ u32 rsvd1; ++ struct be_async_event_trailer trailer; ++} __packed; ++ ++/* GRP5 prio-tc-map event */ ++struct be_async_event_grp5_prio_tc_map { ++ u8 prio_tc_map[8]; /* map[prio] -> tc_id */ + struct be_async_event_trailer trailer; + } __packed; + +@@ -111,41 +163,68 @@ struct be_mcc_mailbox { + }; + + #define CMD_SUBSYSTEM_COMMON 0x1 +-#define CMD_SUBSYSTEM_ETH 0x3 ++#define CMD_SUBSYSTEM_ETH 0x3 ++#define CMD_SUBSYSTEM_LOWLEVEL 0xb + + #define OPCODE_COMMON_NTWK_MAC_QUERY 1 + #define OPCODE_COMMON_NTWK_MAC_SET 2 + #define OPCODE_COMMON_NTWK_MULTICAST_SET 3 +-#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4 ++#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4 + #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5 ++#define OPCODE_COMMON_READ_FLASHROM 6 + #define OPCODE_COMMON_WRITE_FLASHROM 7 + #define OPCODE_COMMON_CQ_CREATE 12 + #define OPCODE_COMMON_EQ_CREATE 13 +-#define OPCODE_COMMON_MCC_CREATE 21 +-#define OPCODE_COMMON_NTWK_RX_FILTER 34 ++#define OPCODE_COMMON_MCC_CREATE 21 ++#define OPCODE_COMMON_SET_QOS 28 ++#define OPCODE_COMMON_MCC_CREATE_EXT 90 ++#define OPCODE_COMMON_SEEPROM_READ 30 ++#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 ++#define OPCODE_COMMON_NTWK_RX_FILTER 34 + #define OPCODE_COMMON_GET_FW_VERSION 35 + #define OPCODE_COMMON_SET_FLOW_CONTROL 36 + #define OPCODE_COMMON_GET_FLOW_CONTROL 37 + #define OPCODE_COMMON_SET_FRAME_SIZE 39 + #define OPCODE_COMMON_MODIFY_EQ_DELAY 41 + #define OPCODE_COMMON_FIRMWARE_CONFIG 42 +-#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 +-#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 +-#define OPCODE_COMMON_MCC_DESTROY 53 +-#define OPCODE_COMMON_CQ_DESTROY 54 +-#define OPCODE_COMMON_EQ_DESTROY 55 ++#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 ++#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 ++#define OPCODE_COMMON_MCC_DESTROY 53 ++#define OPCODE_COMMON_CQ_DESTROY 54 ++#define OPCODE_COMMON_EQ_DESTROY 55 ++#define OPCODE_COMMON_NTWK_SET_LINK_SPEED 57 + #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 + #define OPCODE_COMMON_NTWK_PMAC_ADD 59 + #define OPCODE_COMMON_NTWK_PMAC_DEL 60 + #define OPCODE_COMMON_FUNCTION_RESET 61 ++#define OPCODE_COMMON_MANAGE_FAT 68 ++#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 ++#define OPCODE_COMMON_GET_BEACON_STATE 70 ++#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 ++#define OPCODE_COMMON_GET_PORT_NAME 77 ++#define OPCODE_COMMON_SET_FN_PRIVILEGES 100 ++#define OPCODE_COMMON_GET_PHY_DETAILS 102 ++#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 ++#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 ++#define OPCODE_COMMON_NTWK_GET_LINK_SPEED 134 ++#define OPCODE_COMMON_GET_HSW_CONFIG 152 ++#define OPCODE_COMMON_SET_HSW_CONFIG 153 ++#define OPCODE_COMMON_GET_FN_PRIVILEGES 170 + ++#define OPCODE_ETH_RSS_CONFIG 1 + #define OPCODE_ETH_ACPI_CONFIG 2 + #define OPCODE_ETH_PROMISCUOUS 3 + #define OPCODE_ETH_GET_STATISTICS 4 + #define OPCODE_ETH_TX_CREATE 7 +-#define OPCODE_ETH_RX_CREATE 8 +-#define OPCODE_ETH_TX_DESTROY 9 +-#define OPCODE_ETH_RX_DESTROY 10 ++#define OPCODE_ETH_RX_CREATE 8 ++#define OPCODE_ETH_TX_DESTROY 9 ++#define OPCODE_ETH_RX_DESTROY 10 ++#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12 ++#define OPCODE_ETH_PG_FEATURE_QUERY_REQUEST 23 ++ ++#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 ++#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 ++#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19 + + struct be_cmd_req_hdr { + u8 opcode; /* dword 0 */ +@@ -159,7 +238,7 @@ struct be_cmd_req_hdr { + }; + + #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ +-#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */ ++#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */ + struct be_cmd_resp_hdr { + u32 info; /* dword 0 */ + u32 status; /* dword 1 */ +@@ -265,7 +344,7 @@ struct be_cmd_req_pmac_del { + /******************** Create CQ ***************************/ + /* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +-struct amap_cq_context { ++struct amap_cq_context_be { + u8 cidx[11]; /* dword 0*/ + u8 rsvd0; /* dword 0*/ + u8 coalescwm[2]; /* dword 0*/ +@@ -288,11 +367,28 @@ struct amap_cq_context { + u8 rsvd5[32]; /* dword 3*/ + } __packed; + ++struct amap_cq_context_lancer { ++ u8 rsvd0[12]; /* dword 0*/ ++ u8 coalescwm[2]; /* dword 0*/ ++ u8 nodelay; /* dword 0*/ ++ u8 rsvd1[12]; /* dword 0*/ ++ u8 count[2]; /* dword 0*/ ++ u8 valid; /* dword 0*/ ++ u8 rsvd2; /* dword 0*/ ++ u8 eventable; /* dword 0*/ ++ u8 eqid[16]; /* dword 1*/ ++ u8 rsvd3[15]; /* dword 1*/ ++ u8 armed; /* dword 1*/ ++ u8 rsvd4[32]; /* dword 2*/ ++ u8 rsvd5[32]; /* dword 3*/ ++} __packed; ++ + struct be_cmd_req_cq_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; +- u16 rsvd0; +- u8 context[sizeof(struct amap_cq_context) / 8]; ++ u8 page_size; ++ u8 rsvd0; ++ u8 context[sizeof(struct amap_cq_context_be) / 8]; + struct phys_addr pages[8]; + } __packed; + +@@ -302,10 +398,28 @@ struct be_cmd_resp_cq_create { + u16 rsvd0; + } __packed; + ++struct be_cmd_req_get_fat { ++ struct be_cmd_req_hdr hdr; ++ u32 fat_operation; ++ u32 read_log_offset; ++ u32 read_log_length; ++ u32 data_buffer_size; ++ u32 data_buffer[1]; ++} __packed; ++ ++struct be_cmd_resp_get_fat { ++ struct be_cmd_resp_hdr hdr; ++ u32 log_size; ++ u32 read_log_length; ++ u32 rsvd[2]; ++ u32 data_buffer[1]; ++} __packed; ++ ++ + /******************** Create MCCQ ***************************/ + /* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +-struct amap_mcc_context { ++struct amap_mcc_context_be { + u8 con_index[14]; + u8 rsvd0[2]; + u8 ring_size[4]; +@@ -320,11 +434,31 @@ struct amap_mcc_context { + u8 rsvd2[32]; + } __packed; + ++struct amap_mcc_context_lancer { ++ u8 async_cq_id[16]; ++ u8 ring_size[4]; ++ u8 rsvd0[12]; ++ u8 rsvd1[31]; ++ u8 valid; ++ u8 async_cq_valid[1]; ++ u8 rsvd2[31]; ++ u8 rsvd3[32]; ++} __packed; ++ + struct be_cmd_req_mcc_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; +- u16 rsvd0; +- u8 context[sizeof(struct amap_mcc_context) / 8]; ++ u16 cq_id; ++ u8 context[sizeof(struct amap_mcc_context_be) / 8]; ++ struct phys_addr pages[8]; ++} __packed; ++ ++struct be_cmd_req_mcc_ext_create { ++ struct be_cmd_req_hdr hdr; ++ u16 num_pages; ++ u16 cq_id; ++ u32 async_event_bitmap[1]; ++ u8 context[sizeof(struct amap_mcc_context_be) / 8]; + struct phys_addr pages[8]; + } __packed; + +@@ -335,49 +469,32 @@ struct be_cmd_resp_mcc_create { + } __packed; + + /******************** Create TxQ ***************************/ +-#define BE_ETH_TX_RING_TYPE_STANDARD 2 ++#define ETX_QUEUE_TYPE_STANDARD 0x2 ++#define ETX_QUEUE_TYPE_PRIORITY 0x10 + #define BE_ULP1_NUM 1 + +-/* Pseudo amap definition in which each bit of the actual structure is defined +- * as a byte: used to calculate offset/shift/mask of each field */ +-struct amap_tx_context { +- u8 rsvd0[16]; /* dword 0 */ +- u8 tx_ring_size[4]; /* dword 0 */ +- u8 rsvd1[26]; /* dword 0 */ +- u8 pci_func_id[8]; /* dword 1 */ +- u8 rsvd2[9]; /* dword 1 */ +- u8 ctx_valid; /* dword 1 */ +- u8 cq_id_send[16]; /* dword 2 */ +- u8 rsvd3[16]; /* dword 2 */ +- u8 rsvd4[32]; /* dword 3 */ +- u8 rsvd5[32]; /* dword 4 */ +- u8 rsvd6[32]; /* dword 5 */ +- u8 rsvd7[32]; /* dword 6 */ +- u8 rsvd8[32]; /* dword 7 */ +- u8 rsvd9[32]; /* dword 8 */ +- u8 rsvd10[32]; /* dword 9 */ +- u8 rsvd11[32]; /* dword 10 */ +- u8 rsvd12[32]; /* dword 11 */ +- u8 rsvd13[32]; /* dword 12 */ +- u8 rsvd14[32]; /* dword 13 */ +- u8 rsvd15[32]; /* dword 14 */ +- u8 rsvd16[32]; /* dword 15 */ +-} __packed; +- + struct be_cmd_req_eth_tx_create { + struct be_cmd_req_hdr hdr; + u8 num_pages; + u8 ulp_num; +- u8 type; +- u8 bound_port; +- u8 context[sizeof(struct amap_tx_context) / 8]; ++ u16 type; ++ u16 if_id; ++ u8 queue_size; ++ u8 rsvd1; ++ u32 rsvd2; ++ u16 cq_id; ++ u16 rsvd3; ++ u32 rsvd4[13]; + struct phys_addr pages[8]; + } __packed; + + struct be_cmd_resp_eth_tx_create { + struct be_cmd_resp_hdr hdr; + u16 cid; +- u16 rsvd0; ++ u16 rid; ++ u32 db_offset; ++ u8 tc_id; ++ u8 rsvd0[3]; + } __packed; + + /******************** Create RxQ ***************************/ +@@ -396,7 +513,7 @@ struct be_cmd_req_eth_rx_create { + struct be_cmd_resp_eth_rx_create { + struct be_cmd_resp_hdr hdr; + u16 id; +- u8 cpu_id; ++ u8 rss_id; + u8 rsvd0; + } __packed; + +@@ -429,14 +546,15 @@ enum be_if_flags { + BE_IF_FLAGS_VLAN = 0x100, + BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, + BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, +- BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800 ++ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800, ++ BE_IF_FLAGS_MULTICAST = 0x1000 + }; + + /* An RX interface is an object with one or more MAC addresses and + * filtering capabilities. */ + struct be_cmd_req_if_create { + struct be_cmd_req_hdr hdr; +- u32 version; /* ignore currntly */ ++ u32 version; /* ignore currently */ + u32 capability_flags; + u32 enable_flags; + u8 mac_addr[ETH_ALEN]; +@@ -458,7 +576,7 @@ struct be_cmd_req_if_destroy { + }; + + /*************** HW Stats Get **********************************/ +-struct be_port_rxf_stats { ++struct be_port_rxf_stats_v0 { + u32 rx_bytes_lsd; /* dword 0*/ + u32 rx_bytes_msd; /* dword 1*/ + u32 rx_total_frames; /* dword 2*/ +@@ -527,8 +645,8 @@ struct be_port_rxf_stats { + u32 rx_input_fifo_overflow; /* dword 65*/ + }; + +-struct be_rxf_stats { +- struct be_port_rxf_stats port[2]; ++struct be_rxf_stats_v0 { ++ struct be_port_rxf_stats_v0 port[2]; + u32 rx_drops_no_pbuf; /* dword 132*/ + u32 rx_drops_no_txpb; /* dword 133*/ + u32 rx_drops_no_erx_descr; /* dword 134*/ +@@ -545,31 +663,51 @@ struct be_rxf_stats { + u32 rx_drops_invalid_ring; /* dword 145*/ + u32 forwarded_packets; /* dword 146*/ + u32 rx_drops_mtu; /* dword 147*/ +- u32 rsvd0[15]; ++ u32 rsvd0[7]; ++ u32 port0_jabber_events; ++ u32 port1_jabber_events; ++ u32 rsvd1[6]; + }; + +-struct be_erx_stats { ++struct be_erx_stats_v0 { + u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/ +- u32 debug_wdma_sent_hold; /* dword 44*/ +- u32 debug_wdma_pbfree_sent_hold; /* dword 45*/ +- u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/ +- u32 debug_pmem_pbuf_dealloc; /* dword 47*/ ++ u32 rsvd[4]; + }; + +-struct be_hw_stats { +- struct be_rxf_stats rxf; ++struct be_pmem_stats { ++ u32 eth_red_drops; ++ u32 rsvd[5]; ++}; ++ ++struct be_hw_stats_v0 { ++ struct be_rxf_stats_v0 rxf; + u32 rsvd[48]; +- struct be_erx_stats erx; ++ struct be_erx_stats_v0 erx; ++ struct be_pmem_stats pmem; + }; + +-struct be_cmd_req_get_stats { ++struct be_cmd_req_get_stats_v0 { + struct be_cmd_req_hdr hdr; +- u8 rsvd[sizeof(struct be_hw_stats)]; ++ u8 rsvd[sizeof(struct be_hw_stats_v0)]; + }; + +-struct be_cmd_resp_get_stats { ++struct be_cmd_resp_get_stats_v0 { + struct be_cmd_resp_hdr hdr; +- struct be_hw_stats hw_stats; ++ struct be_hw_stats_v0 hw_stats; ++}; ++ ++struct be_cmd_req_get_cntl_addnl_attribs { ++ struct be_cmd_req_hdr hdr; ++ u8 rsvd[8]; ++}; ++ ++struct be_cmd_resp_get_cntl_addnl_attribs { ++ struct be_cmd_resp_hdr hdr; ++ u16 ipl_file_number; ++ u8 ipl_file_version; ++ u8 rsvd0; ++ u8 on_die_temperature; /* in degrees centigrade*/ ++ u8 rsvd1[3]; + }; + + struct be_cmd_req_vlan_config { +@@ -581,30 +719,22 @@ struct be_cmd_req_vlan_config { + u16 normal_vlan[64]; + } __packed; + +-struct be_cmd_req_promiscuous_config { +- struct be_cmd_req_hdr hdr; +- u8 port0_promiscuous; +- u8 port1_promiscuous; +- u16 rsvd0; +-} __packed; +- ++/******************** RX FILTER ******************************/ ++#define BE_MAX_MC 64 /* set mcast promisc if > 64 */ + struct macaddr { + u8 byte[ETH_ALEN]; + }; + +-struct be_cmd_req_mcast_mac_config { ++struct be_cmd_req_rx_filter { + struct be_cmd_req_hdr hdr; +- u16 num_mac; +- u8 promiscuous; +- u8 interface_id; +- struct macaddr mac[32]; +-} __packed; +- +-static inline struct be_hw_stats * +-hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd) +-{ +- return &cmd->hw_stats; +-} ++ u32 global_flags_mask; ++ u32 global_flags; ++ u32 if_flags_mask; ++ u32 if_flags; ++ u32 if_id; ++ u32 mcast_num; ++ struct macaddr mcast_mac[BE_MAX_MC]; ++}; + + /******************** Link Status Query *******************/ + struct be_cmd_req_link_status { +@@ -619,13 +749,18 @@ enum { + }; + + enum { +- PHY_LINK_SPEED_ZERO = 0x0, /* => No link */ ++ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */ + PHY_LINK_SPEED_10MBPS = 0x1, + PHY_LINK_SPEED_100MBPS = 0x2, + PHY_LINK_SPEED_1GBPS = 0x3, + PHY_LINK_SPEED_10GBPS = 0x4 + }; + ++enum { ++ LINK_DOWN = 0x0, ++ LINK_UP = 0X1 ++}; ++ + struct be_cmd_resp_link_status { + struct be_cmd_resp_hdr hdr; + u8 physical_port; +@@ -634,9 +769,47 @@ struct be_cmd_resp_link_status { + u8 mac_fault; + u8 mgmt_mac_duplex; + u8 mgmt_mac_speed; +- u16 rsvd0; ++ u16 link_speed; ++ u32 logical_link_status; + } __packed; + ++/******************** Port Identification ***************************/ ++/* Identifies the type of port attached to NIC */ ++struct be_cmd_req_port_type { ++ struct be_cmd_req_hdr hdr; ++ u32 page_num; ++ u32 port; ++}; ++ ++enum { ++ TR_PAGE_A0 = 0xa0, ++ TR_PAGE_A2 = 0xa2 ++}; ++ ++struct be_cmd_resp_port_type { ++ struct be_cmd_resp_hdr hdr; ++ u32 page_num; ++ u32 port; ++ struct data { ++ u8 identifier; ++ u8 identifier_ext; ++ u8 connector; ++ u8 transceiver[8]; ++ u8 rsvd0[3]; ++ u8 length_km; ++ u8 length_hm; ++ u8 length_om1; ++ u8 length_om2; ++ u8 length_cu; ++ u8 length_cu_m; ++ u8 vendor_name[16]; ++ u8 rsvd; ++ u8 vendor_oui[3]; ++ u8 vendor_pn[16]; ++ u8 vendor_rev[4]; ++ } data; ++}; ++ + /******************** Get FW Version *******************/ + struct be_cmd_req_get_fw_version { + struct be_cmd_req_hdr hdr; +@@ -686,9 +859,13 @@ struct be_cmd_resp_modify_eq_delay { + } __packed; + + /******************** Get FW Config *******************/ ++#define FLEX10_MODE 0x400 ++#define VNIC_MODE 0x20000 ++#define UMC_ENABLED 0x1000000 ++ + struct be_cmd_req_query_fw_cfg { + struct be_cmd_req_hdr hdr; +- u32 rsvd[30]; ++ u32 rsvd[31]; + }; + + struct be_cmd_resp_query_fw_cfg { +@@ -696,10 +873,61 @@ struct be_cmd_resp_query_fw_cfg { + u32 be_config_number; + u32 asic_revision; + u32 phys_port; +- u32 function_cap; ++ u32 function_mode; + u32 rsvd[26]; ++ u32 function_caps; + }; + ++/******************** RSS Config *******************/ ++/* RSS types */ ++#define RSS_ENABLE_NONE 0x0 ++#define RSS_ENABLE_IPV4 0x1 ++#define RSS_ENABLE_TCP_IPV4 0x2 ++#define RSS_ENABLE_IPV6 0x4 ++#define RSS_ENABLE_TCP_IPV6 0x8 ++ ++struct be_cmd_req_rss_config { ++ struct be_cmd_req_hdr hdr; ++ u32 if_id; ++ u16 enable_rss; ++ u16 cpu_table_size_log2; ++ u32 hash[10]; ++ u8 cpu_table[128]; ++ u8 flush; ++ u8 rsvd0[3]; ++}; ++ ++/******************** Port Beacon ***************************/ ++ ++#define BEACON_STATE_ENABLED 0x1 ++#define BEACON_STATE_DISABLED 0x0 ++ ++struct be_cmd_req_enable_disable_beacon { ++ struct be_cmd_req_hdr hdr; ++ u8 port_num; ++ u8 beacon_state; ++ u8 beacon_duration; ++ u8 status_duration; ++} __packed; ++ ++struct be_cmd_resp_enable_disable_beacon { ++ struct be_cmd_resp_hdr resp_hdr; ++ u32 rsvd0; ++} __packed; ++ ++struct be_cmd_req_get_beacon_state { ++ struct be_cmd_req_hdr hdr; ++ u8 port_num; ++ u8 rsvd0; ++ u16 rsvd1; ++} __packed; ++ ++struct be_cmd_resp_get_beacon_state { ++ struct be_cmd_resp_hdr resp_hdr; ++ u8 beacon_state; ++ u8 rsvd0[3]; ++} __packed; ++ + /****************** Firmware Flash ******************/ + struct flashrom_params { + u32 op_code; +@@ -714,17 +942,468 @@ struct be_cmd_write_flashrom { + struct flashrom_params params; + }; + ++/************************ WOL *******************************/ ++struct be_cmd_req_acpi_wol_magic_config { ++ struct be_cmd_req_hdr hdr; ++ u32 rsvd0[145]; ++ u8 magic_mac[6]; ++ u8 rsvd2[2]; ++} __packed; ++ ++/********************** LoopBack test *********************/ ++struct be_cmd_req_loopback_test { ++ struct be_cmd_req_hdr hdr; ++ u32 loopback_type; ++ u32 num_pkts; ++ u64 pattern; ++ u32 src_port; ++ u32 dest_port; ++ u32 pkt_size; ++}; ++ ++struct be_cmd_resp_loopback_test { ++ struct be_cmd_resp_hdr resp_hdr; ++ u32 status; ++ u32 num_txfer; ++ u32 num_rx; ++ u32 miscomp_off; ++ u32 ticks_compl; ++}; ++ ++struct be_cmd_req_set_lmode { ++ struct be_cmd_req_hdr hdr; ++ u8 src_port; ++ u8 dest_port; ++ u8 loopback_type; ++ u8 loopback_state; ++}; ++ ++struct be_cmd_resp_set_lmode { ++ struct be_cmd_resp_hdr resp_hdr; ++ u8 rsvd0[4]; ++}; ++ ++/********************** DDR DMA test *********************/ ++struct be_cmd_req_ddrdma_test { ++ struct be_cmd_req_hdr hdr; ++ u64 pattern; ++ u32 byte_count; ++ u32 rsvd0; ++ u8 snd_buff[4096]; ++ u8 rsvd1[4096]; ++}; ++ ++struct be_cmd_resp_ddrdma_test { ++ struct be_cmd_resp_hdr hdr; ++ u64 pattern; ++ u32 byte_cnt; ++ u32 snd_err; ++ u8 rsvd0[4096]; ++ u8 rcv_buff[4096]; ++}; ++ ++/*********************** SEEPROM Read ***********************/ ++ ++#define BE_READ_SEEPROM_LEN 1024 ++struct be_cmd_req_seeprom_read { ++ struct be_cmd_req_hdr hdr; ++ u8 rsvd0[BE_READ_SEEPROM_LEN]; ++}; ++ ++struct be_cmd_resp_seeprom_read { ++ struct be_cmd_req_hdr hdr; ++ u8 seeprom_data[BE_READ_SEEPROM_LEN]; ++}; ++ ++enum { ++ PHY_TYPE_CX4_10GB = 0, ++ PHY_TYPE_XFP_10GB, ++ PHY_TYPE_SFP_1GB, ++ PHY_TYPE_SFP_PLUS_10GB, ++ PHY_TYPE_KR_10GB, ++ PHY_TYPE_KX4_10GB, ++ PHY_TYPE_BASET_10GB, ++ PHY_TYPE_BASET_1GB, ++ PHY_TYPE_BASEX_1GB, ++ PHY_TYPE_SGMII, ++ PHY_TYPE_DISABLED = 255 ++}; ++ ++#define BE_AN_EN 0x2 ++#define BE_PAUSE_SYM_EN 0x80 ++ ++struct be_cmd_req_get_phy_info { ++ struct be_cmd_req_hdr hdr; ++ u8 rsvd0[24]; ++}; ++ ++struct be_phy_info { ++ u16 phy_type; ++ u16 interface_type; ++ u32 misc_params; ++ u16 ext_phy_details; ++ u16 rsvd; ++ u16 auto_speeds_supported; ++ u16 fixed_speeds_supported; ++ u32 future_use[2]; ++}; ++ ++struct be_cmd_resp_get_phy_info { ++ struct be_cmd_req_hdr hdr; ++ struct be_phy_info phy_info; ++}; ++ ++/*********************** Set QOS ***********************/ ++ ++#define BE_QOS_BITS_NIC 1 ++ ++struct be_cmd_req_set_qos { ++ struct be_cmd_req_hdr hdr; ++ u32 valid_bits; ++ u32 max_bps_nic; ++ u32 rsvd[7]; ++}; ++ ++struct be_cmd_resp_set_qos { ++ struct be_cmd_resp_hdr hdr; ++ u32 rsvd; ++}; ++ ++/*********************** Controller Attributes ***********************/ ++struct be_cmd_req_cntl_attribs { ++ struct be_cmd_req_hdr hdr; ++}; ++ ++struct be_cmd_resp_cntl_attribs { ++ struct be_cmd_resp_hdr hdr; ++ struct mgmt_controller_attrib attribs; ++}; ++ ++/******************* get port names ***************/ ++struct be_cmd_req_get_port_name { ++ struct be_cmd_req_hdr hdr; ++ u32 rsvd0; ++}; ++ ++struct be_cmd_resp_get_port_name { ++ struct be_cmd_req_hdr hdr; ++ u8 port0_name; ++ u8 port1_name; ++ u8 rsvd0[2]; ++}; ++ ++struct be_cmd_resp_get_port_name_v1 { ++ struct be_cmd_req_hdr hdr; ++ u32 pt : 2; ++ u32 rsvd0 : 30; ++ u8 port0_name; ++ u8 port1_name; ++ u8 port2_name; ++ u8 port3_name; ++}; ++ ++/*********************** Set driver function ***********************/ ++#define CAPABILITY_SW_TIMESTAMPS 2 ++#define CAPABILITY_BE3_NATIVE_ERX_API 4 ++ ++struct be_cmd_req_set_func_cap { ++ struct be_cmd_req_hdr hdr; ++ u32 valid_cap_flags; ++ u32 cap_flags; ++ u8 rsvd[212]; ++}; ++ ++struct be_cmd_resp_set_func_cap { ++ struct be_cmd_resp_hdr hdr; ++ u32 valid_cap_flags; ++ u32 cap_flags; ++ u8 rsvd[212]; ++}; ++ ++/*********************** PG Query Request ****************************/ ++#define REQ_PG_QUERY 0x1 ++#define REQ_PG_FEAT 0x1 ++struct be_cmd_req_pg { ++ struct be_cmd_req_hdr hdr; ++ u32 query; ++ u32 pfc_pg; ++}; ++ ++struct be_cmd_resp_pg { ++ struct be_cmd_resp_hdr hdr; ++ u32 pfc_pg; ++ u32 num_tx_rings; ++}; ++ ++/*********************** Function Privileges ***********************/ ++enum { ++ BE_PRIV_DEFAULT = 0x1, ++ BE_PRIV_LNKQUERY = 0x2, ++ BE_PRIV_LNKSTATS = 0x4, ++ BE_PRIV_LNKMGMT = 0x8, ++ BE_PRIV_LNKDIAG = 0x10, ++ BE_PRIV_UTILQUERY = 0x20, ++ BE_PRIV_FILTMGMT = 0x40, ++ BE_PRIV_IFACEMGMT = 0x80, ++ BE_PRIV_VHADM = 0x100, ++ BE_PRIV_DEVCFG = 0x200, ++ BE_PRIV_DEVSEC = 0x400 ++}; ++ ++struct be_cmd_req_get_fn_privileges { ++ struct be_cmd_req_hdr hdr; ++ u32 rsvd; ++}; ++ ++struct be_cmd_resp_get_fn_privileges { ++ struct be_cmd_resp_hdr hdr; ++ u32 privilege_mask; ++}; ++ ++struct be_cmd_req_set_fn_privileges { ++ struct be_cmd_req_hdr hdr; ++ u32 privilege_mask; ++}; ++ ++struct be_cmd_resp_set_fn_privileges { ++ struct be_cmd_resp_hdr hdr; ++ u32 prev_privilege_mask; ++}; ++ ++/*********************** HSW Config ***********************/ ++struct amap_set_hsw_context { ++ u8 interface_id[16]; ++ u8 rsvd0[14]; ++ u8 pvid_valid; ++ u8 rsvd1; ++ u8 rsvd2[16]; ++ u8 pvid[16]; ++ u8 rsvd3[32]; ++ u8 rsvd4[32]; ++ u8 rsvd5[32]; ++} __packed; ++ ++struct be_cmd_req_set_hsw_config { ++ struct be_cmd_req_hdr hdr; ++ u8 context[sizeof(struct amap_set_hsw_context) / 8]; ++} __packed; ++ ++struct be_cmd_resp_set_hsw_config { ++ struct be_cmd_resp_hdr hdr; ++ u32 rsvd; ++}; ++ ++struct amap_get_hsw_req_context { ++ u8 interface_id[16]; ++ u8 rsvd0[14]; ++ u8 pvid_valid; ++ u8 pport; ++} __packed; ++ ++struct amap_get_hsw_resp_context { ++ u8 rsvd1[16]; ++ u8 pvid[16]; ++ u8 rsvd2[32]; ++ u8 rsvd3[32]; ++ u8 rsvd4[32]; ++} __packed; ++ ++struct be_cmd_req_get_hsw_config { ++ struct be_cmd_req_hdr hdr; ++ u8 context[sizeof(struct amap_get_hsw_req_context) / 8]; ++} __packed; ++ ++struct be_cmd_resp_get_hsw_config { ++ struct be_cmd_resp_hdr hdr; ++ u8 context[sizeof(struct amap_get_hsw_resp_context) / 8]; ++ u32 rsvd; ++}; ++ ++/*************** Set speed ********************/ ++struct be_cmd_req_set_port_speed_v1 { ++ struct be_cmd_req_hdr hdr; ++ u8 port_num; ++ u8 virt_port; ++ u16 mac_speed; ++ u16 dac_cable_length; ++ u16 rsvd0; ++}; ++ ++struct be_cmd_resp_set_port_speed_v1 { ++ struct be_cmd_resp_hdr hdr; ++ u32 rsvd0; ++}; ++ ++/************** get port speed *******************/ ++struct be_cmd_req_get_port_speed { ++ struct be_cmd_req_hdr hdr; ++ u8 port_num; ++}; ++ ++struct be_cmd_resp_get_port_speed { ++ struct be_cmd_req_hdr hdr; ++ u16 mac_speed; ++ u16 dac_cable_length; ++}; ++ ++/*************** HW Stats Get v1 **********************************/ ++#define BE_TXP_SW_SZ 48 ++struct be_port_rxf_stats_v1 { ++ u32 rsvd0[12]; ++ u32 rx_crc_errors; ++ u32 rx_alignment_symbol_errors; ++ u32 rx_pause_frames; ++ u32 rx_priority_pause_frames; ++ u32 rx_control_frames; ++ u32 rx_in_range_errors; ++ u32 rx_out_range_errors; ++ u32 rx_frame_too_long; ++ u32 rx_address_match_errors; ++ u32 rx_dropped_too_small; ++ u32 rx_dropped_too_short; ++ u32 rx_dropped_header_too_small; ++ u32 rx_dropped_tcp_length; ++ u32 rx_dropped_runt; ++ u32 rsvd1[10]; ++ u32 rx_ip_checksum_errs; ++ u32 rx_tcp_checksum_errs; ++ u32 rx_udp_checksum_errs; ++ u32 rsvd2[7]; ++ u32 rx_switched_unicast_packets; ++ u32 rx_switched_multicast_packets; ++ u32 rx_switched_broadcast_packets; ++ u32 rsvd3[3]; ++ u32 tx_pauseframes; ++ u32 tx_priority_pauseframes; ++ u32 tx_controlframes; ++ u32 rsvd4[10]; ++ u32 rxpp_fifo_overflow_drop; ++ u32 rx_input_fifo_overflow_drop; ++ u32 pmem_fifo_overflow_drop; ++ u32 jabber_events; ++ u32 rsvd5[3]; ++}; ++ ++ ++struct be_rxf_stats_v1 { ++ struct be_port_rxf_stats_v1 port[4]; ++ u32 rsvd0[2]; ++ u32 rx_drops_no_pbuf; ++ u32 rx_drops_no_txpb; ++ u32 rx_drops_no_erx_descr; ++ u32 rx_drops_no_tpre_descr; ++ u32 rsvd1[6]; ++ u32 rx_drops_too_many_frags; ++ u32 rx_drops_invalid_ring; ++ u32 forwarded_packets; ++ u32 rx_drops_mtu; ++ u32 rsvd2[14]; ++}; ++ ++struct be_erx_stats_v1 { ++ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/ ++ u32 rsvd[4]; ++}; ++ ++struct be_hw_stats_v1 { ++ struct be_rxf_stats_v1 rxf; ++ u32 rsvd0[BE_TXP_SW_SZ]; ++ struct be_erx_stats_v1 erx; ++ struct be_pmem_stats pmem; ++ u32 rsvd1[3]; ++}; ++ ++struct be_cmd_req_get_stats_v1 { ++ struct be_cmd_req_hdr hdr; ++ u8 rsvd[sizeof(struct be_hw_stats_v1)]; ++}; ++ ++struct be_cmd_resp_get_stats_v1 { ++ struct be_cmd_resp_hdr hdr; ++ struct be_hw_stats_v1 hw_stats; ++}; ++ ++static inline void * ++hw_stats_from_cmd(struct be_adapter *adapter) ++{ ++ if (adapter->generation == BE_GEN3) { ++ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va; ++ ++ return &cmd->hw_stats; ++ } else { ++ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va; ++ ++ return &cmd->hw_stats; ++ } ++} ++ ++static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter) ++{ ++ if (adapter->generation == BE_GEN3) { ++ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); ++ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf; ++ ++ return &rxf_stats->port[adapter->port_num]; ++ } else { ++ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); ++ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf; ++ ++ return &rxf_stats->port[adapter->port_num]; ++ } ++} ++ ++static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter) ++{ ++ if (adapter->generation == BE_GEN3) { ++ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); ++ ++ return &hw_stats->rxf; ++ } else { ++ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); ++ ++ return &hw_stats->rxf; ++ } ++} ++ ++static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter) ++{ ++ if (adapter->generation == BE_GEN3) { ++ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); ++ ++ return &hw_stats->erx; ++ } else { ++ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); ++ ++ return &hw_stats->erx; ++ } ++} ++ ++static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter) ++{ ++ if (adapter->generation == BE_GEN3) { ++ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); ++ ++ return &hw_stats->pmem; ++ } else { ++ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); ++ ++ return &hw_stats->pmem; ++ } ++} ++ + extern int be_pci_fnum_get(struct be_adapter *adapter); + extern int be_cmd_POST(struct be_adapter *adapter); + extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, + u8 type, bool permanent, u32 if_handle); + extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, +- u32 if_id, u32 *pmac_id); +-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); ++ u32 if_id, u32 *pmac_id, u32 domain); ++extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, ++ u32 domain); + extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, + u32 en_flags, u8 *mac, bool pmac_invalid, +- u32 *if_handle, u32 *pmac_id); +-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); ++ u32 *if_handle, u32 *pmac_id, u32 domain); ++extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle, ++ u32 domain); + extern int be_cmd_eq_create(struct be_adapter *adapter, + struct be_queue_info *eq, int eq_delay); + extern int be_cmd_cq_create(struct be_adapter *adapter, +@@ -736,36 +1415,92 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter, + struct be_queue_info *cq); + extern int be_cmd_txq_create(struct be_adapter *adapter, + struct be_queue_info *txq, +- struct be_queue_info *cq); ++ struct be_queue_info *cq, u8 *tc_id); + extern int be_cmd_rxq_create(struct be_adapter *adapter, + struct be_queue_info *rxq, u16 cq_id, + u16 frag_size, u16 max_frame_size, u32 if_id, +- u32 rss); ++ u32 rss, u8 *rss_id); + extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, + int type); ++extern int be_cmd_rxq_destroy(struct be_adapter *adapter, ++ struct be_queue_info *q); + extern int be_cmd_link_status_query(struct be_adapter *adapter, +- bool *link_up); ++ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom); + extern int be_cmd_reset(struct be_adapter *adapter); + extern int be_cmd_get_stats(struct be_adapter *adapter, + struct be_dma_mem *nonemb_cmd); +-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver); ++extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, ++ char *fw_on_flash); + + extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd); + extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, + u16 *vtag_array, u32 num, bool untagged, + bool promiscuous); +-extern int be_cmd_promiscuous_config(struct be_adapter *adapter, +- u8 port_num, bool en); +-extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, +- struct dev_mc_list *mc_list, u32 mc_count); ++extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); + extern int be_cmd_set_flow_control(struct be_adapter *adapter, + u32 tx_fc, u32 rx_fc); + extern int be_cmd_get_flow_control(struct be_adapter *adapter, + u32 *tx_fc, u32 *rx_fc); +-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, +- u32 *port_num, u32 *cap); ++extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, ++ u32 *function_mode, u32 *functions_caps); + extern int be_cmd_reset_function(struct be_adapter *adapter); +-extern int be_process_mcc(struct be_adapter *adapter); ++extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, ++ u16 table_size); ++extern int be_process_mcc(struct be_adapter *adapter, int *status); ++extern int be_cmd_set_beacon_state(struct be_adapter *adapter, ++ u8 port_num, u8 beacon, u8 status, u8 state); ++extern int be_cmd_get_beacon_state(struct be_adapter *adapter, ++ u8 port_num, u32 *state); ++extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port, ++ u8 *connector); + extern int be_cmd_write_flashrom(struct be_adapter *adapter, + struct be_dma_mem *cmd, u32 flash_oper, + u32 flash_opcode, u32 buf_size); ++int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, ++ int offset); ++extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, ++ struct be_dma_mem *nonemb_cmd); ++extern int be_cmd_fw_init(struct be_adapter *adapter); ++extern int be_cmd_fw_clean(struct be_adapter *adapter); ++extern void be_async_mcc_enable(struct be_adapter *adapter); ++extern void be_async_mcc_disable(struct be_adapter *adapter); ++extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, ++ u32 loopback_type, u32 pkt_size, ++ u32 num_pkts, u64 pattern); ++extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, ++ u32 byte_cnt, struct be_dma_mem *cmd); ++extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, ++ struct be_dma_mem *nonemb_cmd); ++extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, ++ u8 loopback_type, u8 enable); ++extern int be_cmd_get_phy_info(struct be_adapter *adapter, ++ struct be_phy_info *phy_info); ++extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); ++extern void be_detect_dump_ue(struct be_adapter *adapter); ++extern int be_cmd_get_die_temperature(struct be_adapter *adapter); ++extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); ++extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); ++extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); ++extern int be_cmd_req_native_mode(struct be_adapter *adapter); ++extern int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name); ++extern int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name); ++extern int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs); ++ ++extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, ++ u32 *privilege, u32 domain); ++extern int be_cmd_set_fn_privileges(struct be_adapter *adapter, ++ u32 mask, u32 *prev, u32 domain); ++extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, ++ u32 domain, u16 intf_id); ++extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, ++ u32 domain, u16 intf_id); ++extern int be_cmd_set_port_speed_v1(struct be_adapter *adapter, u8 port_num, ++ u16 mac_speed, u16 dac_cable_len); ++extern int be_cmd_get_port_speed(struct be_adapter *adapter, u8 port_num, ++ u16 *dac_cable_len, u16 *port_speed); ++#ifdef CONFIG_PALAU ++int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma, ++ int req_size, void *va); ++#endif ++ ++#endif /* !BE_CMDS_H */ +diff --git a/drivers/net/benet/be_compat.c b/drivers/net/benet/be_compat.c +new file mode 100644 +index 0000000..bdd1dba +--- /dev/null ++++ b/drivers/net/benet/be_compat.c +@@ -0,0 +1,630 @@ ++/* ++ * Copyright (C) 2005 - 2011 Emulex ++ * All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License version 2 ++ * as published by the Free Software Foundation. The full GNU General ++ * Public License is included in this distribution in the file called COPYING. ++ * ++ * Contact Information: ++ * linux-drivers@emulex.com ++ * ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 ++ */ ++ ++#include "be.h" ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) ++void be_netdev_ops_init(struct net_device *netdev, struct net_device_ops *ops) ++{ ++ netdev->open = ops->ndo_open; ++ netdev->stop = ops->ndo_stop; ++ netdev->hard_start_xmit = ops->ndo_start_xmit; ++ netdev->set_mac_address = ops->ndo_set_mac_address; ++ netdev->get_stats = ops->ndo_get_stats; ++ netdev->set_multicast_list = ops->ndo_set_rx_mode; ++ netdev->change_mtu = ops->ndo_change_mtu; ++ netdev->vlan_rx_register = ops->ndo_vlan_rx_register; ++ netdev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid; ++ netdev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid; ++ netdev->do_ioctl = ops->ndo_do_ioctl; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ netdev->poll_controller = ops->ndo_poll_controller; ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) ++ netdev->select_queue = ops->ndo_select_queue; ++#endif ++} ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) ++int eth_validate_addr(struct net_device *netdev) ++{ ++ return 0; ++} ++#endif ++ ++/* New NAPI backport */ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24) ++ ++int be_poll_compat(struct net_device *netdev, int *budget) ++{ ++ struct napi_struct *napi = netdev->priv; ++ u32 work_done, can_do; ++ ++ can_do = min(*budget, netdev->quota); ++ work_done = napi->poll(napi, can_do); ++ ++ *budget -= work_done; ++ netdev->quota -= work_done; ++ if (napi->rx) ++ return (work_done >= can_do); ++ return 0; ++} ++ ++ ++#endif /* New NAPI backport */ ++ ++int be_netif_napi_add(struct net_device *netdev, ++ struct napi_struct *napi, ++ int (*poll) (struct napi_struct *, int), int weight) ++{ ++#ifdef HAVE_SIMULATED_MULTI_NAPI ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct net_device *nd; ++ ++ nd = alloc_netdev(0, "", ether_setup); ++ if (!nd) ++ return -ENOMEM; ++ nd->priv = napi; ++ nd->weight = BE_NAPI_WEIGHT; ++ nd->poll = be_poll_compat; ++ set_bit(__LINK_STATE_START, &nd->state); ++ ++ if (napi == &adapter->rx_obj[0].rx_eq.napi) ++ napi->rx = true; ++ napi->poll = poll; ++ napi->dev = nd; ++#ifdef RHEL_NEW_NAPI ++ napi->napi.dev = netdev; ++#endif ++ return 0; ++#else ++ netif_napi_add(netdev, napi, poll, weight); ++ return 0; ++#endif ++} ++void be_netif_napi_del(struct net_device *netdev) ++{ ++#ifdef HAVE_SIMULATED_MULTI_NAPI ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct napi_struct *napi; ++ struct be_rx_obj *rxo; ++ int i; ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ napi = &rxo->rx_eq.napi; ++ if (napi->dev) { ++ free_netdev(napi->dev); ++ napi->dev = NULL; ++ } ++ } ++ ++ napi = &adapter->tx_eq.napi; ++ if (napi->dev) { ++ free_netdev(napi->dev); ++ napi->dev = NULL; ++ } ++#endif ++} ++/* INET_LRO backport */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) ++ ++#define TCP_HDR_LEN(tcph) (tcph->doff << 2) ++#define IP_HDR_LEN(iph) (iph->ihl << 2) ++#define TCP_PAYLOAD_LENGTH(iph, tcph) (ntohs(iph->tot_len) - IP_HDR_LEN(iph) \ ++ - TCP_HDR_LEN(tcph)) ++ ++#define IPH_LEN_WO_OPTIONS 5 ++#define TCPH_LEN_WO_OPTIONS 5 ++#define TCPH_LEN_W_TIMESTAMP 8 ++ ++#define LRO_MAX_PG_HLEN 64 ++#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; } ++/* ++ * Basic tcp checks whether packet is suitable for LRO ++ */ ++static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph, ++ int len, struct net_lro_desc *lro_desc) ++{ ++ /* check ip header: don't aggregate padded frames */ ++ if (ntohs(iph->tot_len) != len) ++ return -1; ++ ++ if (iph->ihl != IPH_LEN_WO_OPTIONS) ++ return -1; ++ ++ if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack ++ || tcph->rst || tcph->syn || tcph->fin) ++ return -1; ++ ++ if (INET_ECN_is_ce(ipv4_get_dsfield(iph))) ++ return -1; ++ ++ if (tcph->doff != TCPH_LEN_WO_OPTIONS ++ && tcph->doff != TCPH_LEN_W_TIMESTAMP) ++ return -1; ++ ++ /* check tcp options (only timestamp allowed) */ ++ if (tcph->doff == TCPH_LEN_W_TIMESTAMP) { ++ u32 *topt = (u32 *)(tcph + 1); ++ ++ if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) ++ | (TCPOPT_TIMESTAMP << 8) ++ | TCPOLEN_TIMESTAMP)) ++ return -1; ++ ++ /* timestamp should be in right order */ ++ topt++; ++ if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval), ++ ntohl(*topt))) ++ return -1; ++ ++ /* timestamp reply should not be zero */ ++ topt++; ++ if (*topt == 0) ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc) ++{ ++ struct iphdr *iph = lro_desc->iph; ++ struct tcphdr *tcph = lro_desc->tcph; ++ u32 *p; ++ __wsum tcp_hdr_csum; ++ ++ tcph->ack_seq = lro_desc->tcp_ack; ++ tcph->window = lro_desc->tcp_window; ++ ++ if (lro_desc->tcp_saw_tstamp) { ++ p = (u32 *)(tcph + 1); ++ *(p+2) = lro_desc->tcp_rcv_tsecr; ++ } ++ ++ iph->tot_len = htons(lro_desc->ip_tot_len); ++ ++ iph->check = 0; ++ iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl); ++ ++ tcph->check = 0; ++ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0); ++ lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum); ++ tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, ++ lro_desc->ip_tot_len - ++ IP_HDR_LEN(iph), IPPROTO_TCP, ++ lro_desc->data_csum); ++} ++ ++static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len) ++{ ++ __wsum tcp_csum; ++ __wsum tcp_hdr_csum; ++ __wsum tcp_ps_hdr_csum; ++ ++ tcp_csum = ~csum_unfold(tcph->check); ++ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum); ++ ++ tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, ++ len + TCP_HDR_LEN(tcph), ++ IPPROTO_TCP, 0); ++ ++ return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum), ++ tcp_ps_hdr_csum); ++} ++ ++static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb, ++ struct iphdr *iph, struct tcphdr *tcph, ++ u16 vlan_tag, struct vlan_group *vgrp) ++{ ++ int nr_frags; ++ u32 *ptr; ++ u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); ++ ++ nr_frags = skb_shinfo(skb)->nr_frags; ++ lro_desc->parent = skb; ++ lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]); ++ lro_desc->iph = iph; ++ lro_desc->tcph = tcph; ++ lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len; ++ lro_desc->tcp_ack = ntohl(tcph->ack_seq); ++ lro_desc->tcp_window = tcph->window; ++ ++ lro_desc->pkt_aggr_cnt = 1; ++ lro_desc->ip_tot_len = ntohs(iph->tot_len); ++ ++ if (tcph->doff == 8) { ++ ptr = (u32 *)(tcph+1); ++ lro_desc->tcp_saw_tstamp = 1; ++ lro_desc->tcp_rcv_tsval = *(ptr+1); ++ lro_desc->tcp_rcv_tsecr = *(ptr+2); ++ } ++ ++ lro_desc->mss = tcp_data_len; ++ lro_desc->vgrp = vgrp; ++ lro_desc->vlan_tag = vlan_tag; ++ lro_desc->active = 1; ++ ++ if (tcp_data_len) ++ lro_desc->data_csum = lro_tcp_data_csum(iph, tcph, ++ tcp_data_len); ++ ++ if (!tcp_data_len) ++ lro_desc->ack_cnt++; ++} ++ ++static inline void lro_clear_desc(struct net_lro_desc *lro_desc) ++{ ++ memset(lro_desc, 0, sizeof(struct net_lro_desc)); ++} ++ ++static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph, ++ struct tcphdr *tcph, int tcp_data_len) ++{ ++ struct sk_buff *parent = lro_desc->parent; ++ u32 *topt; ++ ++ lro_desc->pkt_aggr_cnt++; ++ lro_desc->ip_tot_len += tcp_data_len; ++ lro_desc->tcp_next_seq += tcp_data_len; ++ lro_desc->tcp_window = tcph->window; ++ lro_desc->tcp_ack = tcph->ack_seq; ++ ++ /* don't update tcp_rcv_tsval, would not work with PAWS */ ++ if (lro_desc->tcp_saw_tstamp) { ++ topt = (u32 *) (tcph + 1); ++ lro_desc->tcp_rcv_tsecr = *(topt + 2); ++ } ++ ++ if (tcp_data_len) ++ lro_desc->data_csum = csum_block_add(lro_desc->data_csum, ++ lro_tcp_data_csum(iph, tcph, ++ tcp_data_len), ++ parent->len); ++ ++ parent->len += tcp_data_len; ++ parent->data_len += tcp_data_len; ++ if (tcp_data_len > lro_desc->mss) ++ lro_desc->mss = tcp_data_len; ++} ++ ++static void lro_add_frags(struct net_lro_desc *lro_desc, ++ int len, int hlen, int truesize, ++ struct skb_frag_struct *skb_frags, ++ struct iphdr *iph, struct tcphdr *tcph) ++{ ++ struct sk_buff *skb = lro_desc->parent; ++ int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); ++ ++ lro_add_common(lro_desc, iph, tcph, tcp_data_len); ++ ++ skb->truesize += truesize; ++ ++ if (!tcp_data_len) { ++ put_page(skb_frags[0].page); ++ lro_desc->ack_cnt++; ++ return; ++ } ++ ++ skb_frags[0].page_offset += hlen; ++ skb_frags[0].size -= hlen; ++ ++ while (tcp_data_len > 0) { ++ *(lro_desc->next_frag) = *skb_frags; ++ tcp_data_len -= skb_frags->size; ++ lro_desc->next_frag++; ++ skb_frags++; ++ skb_shinfo(skb)->nr_frags++; ++ } ++} ++ ++static int lro_check_tcp_conn(struct net_lro_desc *lro_desc, ++ struct iphdr *iph, ++ struct tcphdr *tcph) ++{ ++ if ((lro_desc->iph->saddr != iph->saddr) ++ || (lro_desc->iph->daddr != iph->daddr) ++ || (lro_desc->tcph->source != tcph->source) ++ || (lro_desc->tcph->dest != tcph->dest)) ++ return -1; ++ return 0; ++} ++ ++static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr, ++ struct net_lro_desc *lro_arr, ++ struct iphdr *iph, ++ struct tcphdr *tcph) ++{ ++ struct net_lro_desc *lro_desc = NULL; ++ struct net_lro_desc *tmp; ++ int max_desc = lro_mgr->max_desc; ++ int i; ++ ++ for (i = 0; i < max_desc; i++) { ++ tmp = &lro_arr[i]; ++ if (tmp->active) ++ if (!lro_check_tcp_conn(tmp, iph, tcph)) { ++ lro_desc = tmp; ++ goto out; ++ } ++ } ++ ++ for (i = 0; i < max_desc; i++) { ++ if (!lro_arr[i].active) { ++ lro_desc = &lro_arr[i]; ++ goto out; ++ } ++ } ++ ++ LRO_INC_STATS(lro_mgr, no_desc); ++out: ++ return lro_desc; ++} ++ ++static void lro_flush(struct net_lro_mgr *lro_mgr, ++ struct net_lro_desc *lro_desc) ++{ ++ struct be_adapter *adapter = netdev_priv(lro_mgr->dev); ++ ++ if (lro_desc->pkt_aggr_cnt > 1) ++ lro_update_tcp_ip_header(lro_desc); ++ ++ skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss; ++ ++ if (lro_desc->vgrp) { ++ if (test_bit(LRO_F_NAPI, &lro_mgr->features)) ++ vlan_hwaccel_receive_skb(lro_desc->parent, ++ lro_desc->vgrp, ++ lro_desc->vlan_tag); ++ else ++ vlan_hwaccel_rx(lro_desc->parent, ++ lro_desc->vgrp, ++ lro_desc->vlan_tag); ++ ++ } else { ++ if (test_bit(LRO_F_NAPI, &lro_mgr->features)) ++ netif_receive_skb(lro_desc->parent); ++ else ++ netif_rx(lro_desc->parent); ++ } ++ ++ LRO_INC_STATS(lro_mgr, flushed); ++ lro_clear_desc(lro_desc); ++} ++ ++static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr, ++ struct skb_frag_struct *frags, ++ int len, int true_size, ++ void *mac_hdr, ++ int hlen, __wsum sum, ++ u32 ip_summed) ++{ ++ struct sk_buff *skb; ++ struct skb_frag_struct *skb_frags; ++ int data_len = len; ++ int hdr_len = min(len, hlen); ++ ++ skb = netdev_alloc_skb(lro_mgr->dev, hlen); ++ if (!skb) ++ return NULL; ++ ++ skb->len = len; ++ skb->data_len = len - hdr_len; ++ skb->truesize += true_size; ++ skb->tail += hdr_len; ++ ++ memcpy(skb->data, mac_hdr, hdr_len); ++ ++ if (skb->data_len) { ++ skb_frags = skb_shinfo(skb)->frags; ++ while (data_len > 0) { ++ *skb_frags = *frags; ++ data_len -= frags->size; ++ skb_frags++; ++ frags++; ++ skb_shinfo(skb)->nr_frags++; ++ } ++ skb_shinfo(skb)->frags[0].page_offset += hdr_len; ++ skb_shinfo(skb)->frags[0].size -= hdr_len; ++ } else { ++ put_page(frags[0].page); ++ } ++ ++ ++ skb->ip_summed = ip_summed; ++ skb->csum = sum; ++ skb->protocol = eth_type_trans(skb, lro_mgr->dev); ++ return skb; ++} ++ ++static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr, ++ struct skb_frag_struct *frags, ++ int len, int true_size, ++ struct vlan_group *vgrp, ++ u16 vlan_tag, void *priv, __wsum sum) ++{ ++ struct net_lro_desc *lro_desc; ++ struct iphdr *iph; ++ struct tcphdr *tcph; ++ struct sk_buff *skb; ++ u64 flags; ++ void *mac_hdr; ++ int mac_hdr_len; ++ int hdr_len = LRO_MAX_PG_HLEN; ++ int vlan_hdr_len = 0; ++ u8 pad_bytes; ++ ++ if (!lro_mgr->get_frag_header ++ || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph, ++ (void *)&tcph, &flags, priv)) { ++ mac_hdr = page_address(frags->page) + frags->page_offset; ++ goto out1; ++ } ++ ++ if (!(flags & LRO_IPV4) || !(flags & LRO_TCP)) ++ goto out1; ++ ++ hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr); ++ mac_hdr_len = (int)((void *)(iph) - mac_hdr); ++ ++ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); ++ if (!lro_desc) ++ goto out1; ++ ++ pad_bytes = len - (ntohs(iph->tot_len) + mac_hdr_len); ++ if (!TCP_PAYLOAD_LENGTH(iph, tcph) && pad_bytes) { ++ len -= pad_bytes; /* trim the packet */ ++ frags[0].size -= pad_bytes; ++ true_size -= pad_bytes; ++ } ++ ++ if (!lro_desc->active) { /* start new lro session */ ++ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL)) ++ goto out1; ++ ++ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr, ++ hdr_len, 0, lro_mgr->ip_summed_aggr); ++ if (!skb) ++ goto out; ++ ++ if ((skb->protocol == htons(ETH_P_8021Q)) ++ && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features)) ++ vlan_hdr_len = VLAN_HLEN; ++ ++ iph = (void *)(skb->data + vlan_hdr_len); ++ tcph = (void *)((u8 *)skb->data + vlan_hdr_len ++ + IP_HDR_LEN(iph)); ++ ++ lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp); ++ LRO_INC_STATS(lro_mgr, aggregated); ++ return 0; ++ } ++ ++ if (lro_desc->tcp_next_seq != ntohl(tcph->seq)) ++ goto out2; ++ ++ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc)) ++ goto out2; ++ ++ lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph); ++ LRO_INC_STATS(lro_mgr, aggregated); ++ ++ if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) || ++ lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu)) ++ lro_flush(lro_mgr, lro_desc); ++ ++ return NULL; ++ ++out2: /* send aggregated packets to the stack */ ++ lro_flush(lro_mgr, lro_desc); ++ ++out1: /* Original packet has to be posted to the stack */ ++ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr, ++ hdr_len, sum, lro_mgr->ip_summed); ++out: ++ return skb; ++} ++ ++void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr, ++ struct skb_frag_struct *frags, ++ int len, int true_size, void *priv, __wsum sum) ++{ ++ struct sk_buff *skb; ++ ++ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0, ++ priv, sum); ++ if (!skb) ++ return; ++ ++ if (test_bit(LRO_F_NAPI, &lro_mgr->features)) ++ netif_receive_skb(skb); ++ else ++ netif_rx(skb); ++} ++ ++void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr, ++ struct skb_frag_struct *frags, ++ int len, int true_size, ++ struct vlan_group *vgrp, ++ u16 vlan_tag, void *priv, __wsum sum) ++{ ++ struct sk_buff *skb; ++ ++ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp, ++ vlan_tag, priv, sum); ++ if (!skb) ++ return; ++ ++ if (test_bit(LRO_F_NAPI, &lro_mgr->features)) ++ vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag); ++ else ++ vlan_hwaccel_rx(skb, vgrp, vlan_tag); ++} ++ ++void lro_flush_all_compat(struct net_lro_mgr *lro_mgr) ++{ ++ int i; ++ struct net_lro_desc *lro_desc = lro_mgr->lro_arr; ++ ++ for (i = 0; i < lro_mgr->max_desc; i++) { ++ if (lro_desc[i].active) ++ lro_flush(lro_mgr, &lro_desc[i]); ++ } ++} ++#endif /* INET_LRO backport */ ++ ++#ifndef TX_MQ ++struct net_device *alloc_etherdev_mq_compat(int sizeof_priv, ++ unsigned int queue_count) ++{ ++ return alloc_etherdev(sizeof_priv); ++} ++ ++void netif_wake_subqueue_compat(struct net_device *dev, u16 queue_index) ++{ ++ netif_wake_queue(dev); ++} ++ ++void netif_stop_subqueue_compat(struct net_device *dev, u16 queue_index) ++{ ++ netif_stop_queue(dev); ++} ++ ++int __netif_subqueue_stopped_compat(const struct net_device *dev, ++ u16 queue_index) ++{ ++ return netif_queue_stopped(dev); ++} ++ ++u16 skb_get_queue_mapping_compat(const struct sk_buff *skb) ++{ ++ return 0; ++} ++ ++void netif_set_real_num_tx_queues_compat(struct net_device *dev, ++ unsigned int txq) ++{ ++ return; ++} ++ ++u16 skb_tx_hash_compat(const struct net_device *dev, ++ const struct sk_buff *skb) ++{ ++ return 0; ++} ++#endif +diff --git a/drivers/net/benet/be_compat.h b/drivers/net/benet/be_compat.h +new file mode 100644 +index 0000000..8ceecc8 +--- /dev/null ++++ b/drivers/net/benet/be_compat.h +@@ -0,0 +1,621 @@ ++/* ++ * Copyright (C) 2005 - 2011 Emulex ++ * All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License version 2 ++ * as published by the Free Software Foundation. The full GNU General ++ * Public License is included in this distribution in the file called COPYING. ++ * ++ * Contact Information: ++ * linux-drivers@emulex.com ++ * ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 ++ */ ++ ++#ifndef BE_COMPAT_H ++#define BE_COMPAT_H ++ ++/****************** RHEL5 and SLES10 backport ***************************/ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) ++ ++#ifndef upper_32_bits ++#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) ++#endif ++ ++#ifndef CHECKSUM_PARTIAL ++#define CHECKSUM_PARTIAL CHECKSUM_HW ++#define CHECKSUM_COMPLETE CHECKSUM_HW ++#endif ++ ++#if !defined(ip_hdr) ++#define ip_hdr(skb) (skb->nh.iph) ++#define ipv6_hdr(skb) (skb->nh.ipv6h) ++#endif ++ ++#if !defined(__packed) ++#define __packed __attribute__ ((packed)) ++#endif ++ ++#if !defined(RHEL_MINOR) ++/* Only for RH5U1 (Maui) and SLES10 NIC driver */ ++enum { ++ false = 0, ++ true = 1 ++}; ++#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) ++/* Only for RH5U1 (Maui) NIC driver */ ++static inline __attribute__((const)) ++int __ilog2_u32(u32 n) ++{ ++ return fls(n) - 1; ++} ++#endif ++#endif ++ ++#define ETH_FCS_LEN 4 ++#define bool u8 ++#ifndef PTR_ALIGN ++#define PTR_ALIGN(p, a) ((typeof(p)) \ ++ ALIGN((unsigned long)(p), (a))) ++#endif ++#define list_first_entry(ptr, type, member) \ ++ list_entry((ptr)->next, type, member) ++ ++#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \ ++ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16) ++#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] \ ++ __devinitdata ++#endif ++ ++/* Backport of request_irq */ ++typedef irqreturn_t(*backport_irq_handler_t) (int, void *); ++static inline int ++backport_request_irq(unsigned int irq, irqreturn_t(*handler) (int, void *), ++ unsigned long flags, const char *dev_name, void *dev_id) ++{ ++ return request_irq(irq, ++ (irqreturn_t(*) (int, void *, struct pt_regs *))handler, ++ flags, dev_name, dev_id); ++} ++#define request_irq backport_request_irq ++ ++#endif /*** RHEL5 and SLES10 backport ***/ ++ ++#if !defined(__packed) ++#define __packed __attribute__ ((packed)) ++#endif ++ ++/****************** SLES10 only backport ***************************/ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) ++ ++#include <linux/tifm.h> ++ ++#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f)) ++#define IRQF_SHARED SA_SHIRQ ++#define CHECKSUM_PARTIAL CHECKSUM_HW ++#define CHECKSUM_COMPLETE CHECKSUM_HW ++#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) ++#define NETIF_F_IPV6_CSUM NETIF_F_IP_CSUM ++#define NETIF_F_TSO6 NETIF_F_TSO ++ ++ ++static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, ++ unsigned int length) ++{ ++ /* 16 == NET_PAD_SKB */ ++ struct sk_buff *skb; ++ skb = alloc_skb(length + 16, GFP_ATOMIC); ++ if (likely(skb != NULL)) { ++ skb_reserve(skb, 16); ++ skb->dev = dev; ++ } ++ return skb; ++} ++ ++#define PCI_SAVE_STATE(x) ++ ++#else /* SLES10 only backport */ ++ ++#define PCI_SAVE_STATE(x) pci_save_state(x) ++ ++#endif /* SLES10 only backport */ ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31) ++#define netdev_tx_t int ++#endif ++ ++#ifndef VLAN_PRIO_MASK ++#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ ++#define VLAN_PRIO_SHIFT 13 ++#endif ++ ++/* ++ * Backport of netdev ops struct ++ */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) ++struct net_device_ops { ++ int (*ndo_init)(struct net_device *dev); ++ void (*ndo_uninit)(struct net_device *dev); ++ int (*ndo_open)(struct net_device *dev); ++ int (*ndo_stop)(struct net_device *dev); ++ int (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev); ++ u16 (*ndo_select_queue)(struct net_device *dev, ++ struct sk_buff *skb); ++ void (*ndo_change_rx_flags)(struct net_device *dev, int flags); ++ void (*ndo_set_rx_mode)(struct net_device *dev); ++ void (*ndo_set_multicast_list)(struct net_device *dev); ++ int (*ndo_set_mac_address)(struct net_device *dev, void *addr); ++ int (*ndo_validate_addr)(struct net_device *dev); ++ int (*ndo_do_ioctl)(struct net_device *dev, ++ struct ifreq *ifr, int cmd); ++ int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); ++ int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); ++ int (*ndo_neigh_setup)(struct net_device *dev, ++ struct neigh_parms *); ++ void (*ndo_tx_timeout) (struct net_device *dev); ++ ++ struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); ++ ++ void (*ndo_vlan_rx_register)(struct net_device *dev, ++ struct vlan_group *grp); ++ void (*ndo_vlan_rx_add_vid)(struct net_device *dev, ++ unsigned short vid); ++ void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, ++ unsigned short vid); ++#ifdef CONFIG_NET_POLL_CONTROLLER ++#define HAVE_NETDEV_POLL ++ void (*ndo_poll_controller)(struct net_device *dev); ++#endif ++}; ++extern void be_netdev_ops_init(struct net_device *netdev, ++ struct net_device_ops *ops); ++extern int eth_validate_addr(struct net_device *); ++ ++#endif /* Netdev ops backport */ ++ ++#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 29) ++#undef NETIF_F_GRO ++#endif ++ ++#ifdef NO_GRO ++#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5))) ++#undef NETIF_F_GRO ++#endif ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++#define HAVE_ETHTOOL_FLASH ++#endif ++ ++/* ++ * Backport of NAPI ++ */ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24) ++ ++#if defined(RHEL_MINOR) && (RHEL_MINOR > 3) ++#define RHEL_NEW_NAPI ++#endif ++ ++/* We need a new struct that has some meta data beyond rhel 5.4's napi_struct ++ * to fix rhel5.4's half-baked new napi implementation. ++ * We don't want to use rhel 5.4's broken napi_complete; so ++ * define a new be_napi_complete that executes the logic only for Rx ++ */ ++ ++#ifdef RHEL_NEW_NAPI ++#define napi_complete be_napi_complete ++typedef struct napi_struct rhel_napi_struct; ++#endif ++#define napi_struct be_napi_struct ++#define napi_gro_frags(napi) napi_gro_frags((rhel_napi_struct *) napi) ++#define vlan_gro_frags(napi, vlan_grp, vid)\ ++ vlan_gro_frags((rhel_napi_struct *) napi, vlan_grp, vid) ++#define napi_get_frags(napi) napi_get_frags((rhel_napi_struct *) napi) ++ ++struct napi_struct { ++#ifdef RHEL_NEW_NAPI ++ rhel_napi_struct napi; /* must be the first member */ ++#endif ++ struct net_device *dev; ++ int (*poll) (struct napi_struct *napi, int budget); ++ bool rx; ++}; ++ ++static inline void napi_complete(struct napi_struct *napi) ++{ ++#ifdef NETIF_F_GRO ++ napi_gro_flush((rhel_napi_struct *)napi); ++#endif ++ netif_rx_complete(napi->dev); ++} ++ ++static inline void napi_schedule(struct napi_struct *napi) ++{ ++ netif_rx_schedule(napi->dev); ++} ++ ++static inline void napi_enable(struct napi_struct *napi) ++{ ++ netif_poll_enable(napi->dev); ++} ++ ++static inline void napi_disable(struct napi_struct *napi) ++{ ++ netif_poll_disable(napi->dev); ++} ++ ++#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \ ++ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16) ++static inline void vlan_group_set_device(struct vlan_group *vg, ++ u16 vlan_id, ++ struct net_device *dev) ++{ ++ struct net_device **array; ++ if (!vg) ++ return; ++ array = vg->vlan_devices; ++ array[vlan_id] = dev; ++} ++#endif ++ ++#endif /* New NAPI backport */ ++ ++extern int be_netif_napi_add(struct net_device *netdev, ++ struct napi_struct *napi, ++ int (*poll) (struct napi_struct *, int), int weight); ++extern void be_netif_napi_del(struct net_device *netdev); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) ++#define HAVE_SIMULATED_MULTI_NAPI ++#endif ++ ++/************** Backport of Delayed work queues interface ****************/ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19) ++#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \ ++ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16) ++struct delayed_work { ++ struct work_struct work; ++}; ++#endif ++ ++#define INIT_DELAYED_WORK(_work, _func) \ ++ INIT_WORK(&(_work)->work, _func, &(_work)->work) ++ ++static inline int backport_cancel_delayed_work_sync(struct delayed_work *work) ++{ ++ cancel_rearming_delayed_work(&work->work); ++ return 0; ++} ++#define cancel_delayed_work_sync backport_cancel_delayed_work_sync ++ ++static inline int backport_schedule_delayed_work(struct delayed_work *work, ++ unsigned long delay) ++{ ++ if (unlikely(!delay)) ++ return schedule_work(&work->work); ++ else ++ return schedule_delayed_work(&work->work, delay); ++} ++#define schedule_delayed_work backport_schedule_delayed_work ++#endif /* backport delayed workqueue */ ++ ++ ++/************** Backport of INET_LRO **********************************/ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18) ++ ++#include <linux/inet_lro.h> ++ ++#else ++ ++#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) ++ ++#if defined(RHEL_MINOR) && RHEL_MINOR < 6 ++typedef __u16 __bitwise __sum16; ++typedef __u32 __bitwise __wsum; ++#endif ++ ++#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR <= 3)) || \ ++ (!defined(RHEL_MINOR))) ++static inline __wsum csum_unfold(__sum16 n) ++{ ++ return (__force __wsum)n; ++} ++#endif ++ ++#endif ++ ++#define lro_flush_all lro_flush_all_compat ++#define lro_vlan_hwaccel_receive_frags lro_vlan_hwaccel_receive_frags_compat ++#define lro_receive_frags lro_receive_frags_compat ++ ++struct net_lro_stats { ++ unsigned long aggregated; ++ unsigned long flushed; ++ unsigned long no_desc; ++}; ++ ++struct net_lro_desc { ++ struct sk_buff *parent; ++ struct sk_buff *last_skb; ++ struct skb_frag_struct *next_frag; ++ struct iphdr *iph; ++ struct tcphdr *tcph; ++ struct vlan_group *vgrp; ++ __wsum data_csum; ++ u32 tcp_rcv_tsecr; ++ u32 tcp_rcv_tsval; ++ u32 tcp_ack; ++ u32 tcp_next_seq; ++ u32 skb_tot_frags_len; ++ u32 ack_cnt; ++ u16 ip_tot_len; ++ u16 tcp_saw_tstamp; /* timestamps enabled */ ++ u16 tcp_window; ++ u16 vlan_tag; ++ int pkt_aggr_cnt; /* counts aggregated packets */ ++ int vlan_packet; ++ int mss; ++ int active; ++}; ++ ++struct net_lro_mgr { ++ struct net_device *dev; ++ struct net_lro_stats stats; ++ ++ /* LRO features */ ++ unsigned long features; ++#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */ ++#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted ++ from received packets and eth protocol ++ is still ETH_P_8021Q */ ++ ++ u32 ip_summed; /* Set in non generated SKBs in page mode */ ++ u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY ++ * or CHECKSUM_NONE */ ++ ++ int max_desc; /* Max number of LRO descriptors */ ++ int max_aggr; /* Max number of LRO packets to be aggregated */ ++ ++ struct net_lro_desc *lro_arr; /* Array of LRO descriptors */ ++ ++ /* Optimized driver functions ++ * get_skb_header: returns tcp and ip header for packet in SKB ++ */ ++ int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr, ++ void **tcpudp_hdr, u64 *hdr_flags, void *priv); ++ ++ /* hdr_flags: */ ++#define LRO_IPV4 1 /* ip_hdr is IPv4 header */ ++#define LRO_TCP 2 /* tcpudp_hdr is TCP header */ ++ ++ /* ++ * get_frag_header: returns mac, tcp and ip header for packet in SKB ++ * ++ * @hdr_flags: Indicate what kind of LRO has to be done ++ * (IPv4/IPv6/TCP/UDP) ++ */ ++ int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr, ++ void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, ++ void *priv); ++}; ++ ++extern void lro_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb, ++ void *priv); ++ ++extern void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr, ++ struct sk_buff *skb, struct vlan_group *vgrp, ++ u16 vlan_tag, void *priv); ++ ++/* This functions aggregate fragments and generate SKBs do pass ++ * the packets to the stack. ++ * ++ * @lro_mgr: LRO manager to use ++ * @frags: Fragment to be processed. Must contain entire header in first ++ * element. ++ * @len: Length of received data ++ * @true_size: Actual size of memory the fragment is consuming ++ * @priv: Private data that may be used by driver functions ++ * (for example get_tcp_ip_hdr) ++ */ ++extern void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr, ++ struct skb_frag_struct *frags, int len, int true_size, ++ void *priv, __wsum sum); ++ ++extern void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr, ++ struct skb_frag_struct *frags, int len, int true_size, ++ struct vlan_group *vgrp, u16 vlan_tag, void *priv, ++ __wsum sum); ++ ++/* Forward all aggregated SKBs held by lro_mgr to network stack */ ++extern void lro_flush_all_compat(struct net_lro_mgr *lro_mgr); ++ ++extern void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph, ++ struct tcphdr *tcph); ++#endif /* backport of inet_lro */ ++ ++#ifndef ETHTOOL_FLASH_MAX_FILENAME ++#define ETHTOOL_FLASH_MAX_FILENAME 128 ++#endif ++ ++#if defined(CONFIG_XEN) && !defined(NETIF_F_GRO) ++#define BE_INIT_FRAGS_PER_FRAME (u32) 1 ++#else ++#define BE_INIT_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS)) ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) ++#ifdef CONFIG_PCI_IOV ++#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR == 6))) ++#undef CONFIG_PCI_IOV ++#endif ++#endif ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) ++#define dev_to_node(dev) -1 ++#endif ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) ++#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR > 6))) ++static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length) ++{ ++ struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN); ++ ++ if (NET_IP_ALIGN && skb) ++ skb_reserve(skb, NET_IP_ALIGN); ++ return skb; ++} ++#endif ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) ++#ifndef netif_set_gso_max_size ++#define netif_set_gso_max_size(netdev, size) do {} while (0) ++#endif ++#endif ++ ++#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)) ++#if defined(RHEL_MINOR) && (RHEL_MINOR <= 4) ++static inline int skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; ++} ++#endif ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) ++static inline int skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return (ip_hdr(skb)->version == 6); ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) ++#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6))) ++#define HAVE_SRIOV_CONFIG ++#endif ++#endif ++ ++#ifndef NETIF_F_VLAN_SG ++#define NETIF_F_VLAN_SG NETIF_F_SG ++#endif ++ ++#ifndef NETIF_F_VLAN_CSUM ++#define NETIF_F_VLAN_CSUM NETIF_F_HW_CSUM ++#endif ++ ++#ifndef NETIF_F_VLAN_TSO ++#define NETIF_F_VLAN_TSO NETIF_F_TSO ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) ++#define vlan_features features ++#endif ++ ++#ifndef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR(bus) dma_addr_t bus ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) ++ ++#ifndef netdev_mc_count ++#define netdev_mc_count(nd) (nd->mc_count) ++#endif ++ ++#ifndef netdev_hw_addr ++#define netdev_hw_addr dev_mc_list ++#endif ++ ++#ifndef netdev_for_each_mc_addr ++#define netdev_for_each_mc_addr(ha, nd) \ ++ for (ha = (nd)->mc_list; ha; ha = ha->next) ++#endif ++ ++#define DMI_ADDR dmi_addr ++#else ++#define DMI_ADDR addr ++#endif ++ ++#ifndef VLAN_GROUP_ARRAY_LEN ++#define VLAN_GROUP_ARRAY_LEN VLAN_N_VID ++#endif ++/**************************** Multi TXQ Support ******************************/ ++ ++/* Supported only in RHEL6 and SL11.1 (barring one execption) */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++#define MQ_TX ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) ++#define alloc_etherdev_mq(sz, cnt) alloc_etherdev(sz) ++#define skb_get_queue_mapping(skb) 0 ++#define skb_tx_hash(dev, skb) 0 ++#define netif_set_real_num_tx_queues(dev, txq) do {} while(0) ++#define netif_wake_subqueue(dev, idx) netif_wake_queue(dev) ++#define netif_stop_subqueue(dev, idx) netif_stop_queue(dev) ++#define __netif_subqueue_stopped(dev, idx) netif_queue_stopped(dev) ++#endif /* < 2.6.27 */ ++ ++#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32))) ++#define skb_tx_hash(dev, skb) 0 ++#define netif_set_real_num_tx_queues(dev, txq) do {} while(0) ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++#define netif_set_real_num_tx_queues be_set_real_num_tx_queues ++static inline void be_set_real_num_tx_queues(struct net_device *dev, ++ unsigned int txq) ++{ ++ dev->real_num_tx_queues = txq; ++} ++#endif ++ ++#include <linux/if_vlan.h> ++static inline void be_reset_skb_tx_vlan(struct sk_buff *skb) ++{ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18) ++ skb->vlan_tci = 0; ++#else ++ struct vlan_skb_tx_cookie *cookie; ++ ++ cookie = VLAN_TX_SKB_CB(skb); ++ cookie->magic = 0; ++#endif ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) ++static inline void skb_set_network_header(struct sk_buff *skb, const int offset) ++{ ++ skb->nh.raw = skb->data + offset; ++} ++#endif ++ ++static inline struct sk_buff *be_vlan_put_tag(struct sk_buff *skb, ++ unsigned short vlan_tag) ++{ ++ struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_tag); ++ /* On kernel versions < 2.6.27 the __vlan_put_tag() function ++ * distorts the network layer hdr pointer in the skb which ++ * affects the detection of UDP/TCP packets down the line in ++ * wrb_fill_hdr().This work-around sets it right. ++ */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) ++ skb_set_network_header(new_skb, VLAN_ETH_HLEN); ++#endif ++ return new_skb; ++} ++ ++#ifndef ACCESS_ONCE ++#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++#endif ++ ++#endif /* BE_COMPAT_H */ +diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c +index f0fd95b..37bad99 100644 +--- a/drivers/net/benet/be_ethtool.c ++++ b/drivers/net/benet/be_ethtool.c +@@ -1,18 +1,18 @@ + /* +- * Copyright (C) 2005 - 2009 ServerEngines ++ * Copyright (C) 2005 - 2011 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation. The full GNU General ++ * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: +- * linux-drivers@serverengines.com ++ * linux-drivers@emulex.com + * +- * ServerEngines +- * 209 N. Fair Oaks Ave +- * Sunnyvale, CA 94085 ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 + */ + + #include "be.h" +@@ -26,21 +26,19 @@ struct be_ethtool_stat { + int offset; + }; + +-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT}; ++enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; + #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ + offsetof(_struct, field) +-#define NETSTAT_INFO(field) #field, NETSTAT,\ ++#define NETSTAT_INFO(field) #field, NETSTAT,\ + FIELDINFO(struct net_device_stats,\ + field) +-#define DRVSTAT_INFO(field) #field, DRVSTAT,\ +- FIELDINFO(struct be_drvr_stats, field) +-#define MISCSTAT_INFO(field) #field, MISCSTAT,\ +- FIELDINFO(struct be_rxf_stats, field) +-#define PORTSTAT_INFO(field) #field, PORTSTAT,\ +- FIELDINFO(struct be_port_rxf_stats, \ ++#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\ ++ FIELDINFO(struct be_tx_stats, field) ++#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\ ++ FIELDINFO(struct be_rx_stats, field) ++#define DRVSTAT_INFO(field) #field, DRVSTAT,\ ++ FIELDINFO(struct be_drv_stats, \ + field) +-#define ERXSTAT_INFO(field) #field, ERXSTAT,\ +- FIELDINFO(struct be_erx_stats, field) + + static const struct be_ethtool_stat et_stats[] = { + {NETSTAT_INFO(rx_packets)}, +@@ -51,70 +49,131 @@ static const struct be_ethtool_stat et_stats[] = { + {NETSTAT_INFO(tx_errors)}, + {NETSTAT_INFO(rx_dropped)}, + {NETSTAT_INFO(tx_dropped)}, +- {DRVSTAT_INFO(be_tx_reqs)}, +- {DRVSTAT_INFO(be_tx_stops)}, +- {DRVSTAT_INFO(be_fwd_reqs)}, +- {DRVSTAT_INFO(be_tx_wrbs)}, +- {DRVSTAT_INFO(be_polls)}, + {DRVSTAT_INFO(be_tx_events)}, +- {DRVSTAT_INFO(be_rx_events)}, +- {DRVSTAT_INFO(be_tx_compl)}, +- {DRVSTAT_INFO(be_rx_compl)}, +- {DRVSTAT_INFO(be_ethrx_post_fail)}, +- {DRVSTAT_INFO(be_802_3_dropped_frames)}, +- {DRVSTAT_INFO(be_802_3_malformed_frames)}, +- {DRVSTAT_INFO(be_tx_rate)}, +- {DRVSTAT_INFO(be_rx_rate)}, +- {PORTSTAT_INFO(rx_unicast_frames)}, +- {PORTSTAT_INFO(rx_multicast_frames)}, +- {PORTSTAT_INFO(rx_broadcast_frames)}, +- {PORTSTAT_INFO(rx_crc_errors)}, +- {PORTSTAT_INFO(rx_alignment_symbol_errors)}, +- {PORTSTAT_INFO(rx_pause_frames)}, +- {PORTSTAT_INFO(rx_control_frames)}, +- {PORTSTAT_INFO(rx_in_range_errors)}, +- {PORTSTAT_INFO(rx_out_range_errors)}, +- {PORTSTAT_INFO(rx_frame_too_long)}, +- {PORTSTAT_INFO(rx_address_match_errors)}, +- {PORTSTAT_INFO(rx_vlan_mismatch)}, +- {PORTSTAT_INFO(rx_dropped_too_small)}, +- {PORTSTAT_INFO(rx_dropped_too_short)}, +- {PORTSTAT_INFO(rx_dropped_header_too_small)}, +- {PORTSTAT_INFO(rx_dropped_tcp_length)}, +- {PORTSTAT_INFO(rx_dropped_runt)}, +- {PORTSTAT_INFO(rx_fifo_overflow)}, +- {PORTSTAT_INFO(rx_input_fifo_overflow)}, +- {PORTSTAT_INFO(rx_ip_checksum_errs)}, +- {PORTSTAT_INFO(rx_tcp_checksum_errs)}, +- {PORTSTAT_INFO(rx_udp_checksum_errs)}, +- {PORTSTAT_INFO(rx_non_rss_packets)}, +- {PORTSTAT_INFO(rx_ipv4_packets)}, +- {PORTSTAT_INFO(rx_ipv6_packets)}, +- {PORTSTAT_INFO(tx_unicastframes)}, +- {PORTSTAT_INFO(tx_multicastframes)}, +- {PORTSTAT_INFO(tx_broadcastframes)}, +- {PORTSTAT_INFO(tx_pauseframes)}, +- {PORTSTAT_INFO(tx_controlframes)}, +- {MISCSTAT_INFO(rx_drops_no_pbuf)}, +- {MISCSTAT_INFO(rx_drops_no_txpb)}, +- {MISCSTAT_INFO(rx_drops_no_erx_descr)}, +- {MISCSTAT_INFO(rx_drops_no_tpre_descr)}, +- {MISCSTAT_INFO(rx_drops_too_many_frags)}, +- {MISCSTAT_INFO(rx_drops_invalid_ring)}, +- {MISCSTAT_INFO(forwarded_packets)}, +- {MISCSTAT_INFO(rx_drops_mtu)}, +- {ERXSTAT_INFO(rx_drops_no_fragments)}, ++ {DRVSTAT_INFO(rx_crc_errors)}, ++ {DRVSTAT_INFO(rx_alignment_symbol_errors)}, ++ {DRVSTAT_INFO(rx_pause_frames)}, ++ {DRVSTAT_INFO(rx_control_frames)}, ++ {DRVSTAT_INFO(rx_in_range_errors)}, ++ {DRVSTAT_INFO(rx_out_range_errors)}, ++ {DRVSTAT_INFO(rx_frame_too_long)}, ++ {DRVSTAT_INFO(rx_address_match_errors)}, ++ {DRVSTAT_INFO(rx_dropped_too_small)}, ++ {DRVSTAT_INFO(rx_dropped_too_short)}, ++ {DRVSTAT_INFO(rx_dropped_header_too_small)}, ++ {DRVSTAT_INFO(rx_dropped_tcp_length)}, ++ {DRVSTAT_INFO(rx_dropped_runt)}, ++ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, ++ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, ++ {DRVSTAT_INFO(rx_ip_checksum_errs)}, ++ {DRVSTAT_INFO(rx_tcp_checksum_errs)}, ++ {DRVSTAT_INFO(rx_udp_checksum_errs)}, ++ {DRVSTAT_INFO(rx_switched_unicast_packets)}, ++ {DRVSTAT_INFO(rx_switched_multicast_packets)}, ++ {DRVSTAT_INFO(rx_switched_broadcast_packets)}, ++ {DRVSTAT_INFO(tx_pauseframes)}, ++ {DRVSTAT_INFO(tx_controlframes)}, ++ {DRVSTAT_INFO(rx_priority_pause_frames)}, ++ {DRVSTAT_INFO(pmem_fifo_overflow_drop)}, ++ {DRVSTAT_INFO(jabber_events)}, ++ {DRVSTAT_INFO(rx_drops_no_pbuf)}, ++ {DRVSTAT_INFO(rx_drops_no_txpb)}, ++ {DRVSTAT_INFO(rx_drops_no_erx_descr)}, ++ {DRVSTAT_INFO(rx_drops_no_tpre_descr)}, ++ {DRVSTAT_INFO(rx_drops_too_many_frags)}, ++ {DRVSTAT_INFO(rx_drops_invalid_ring)}, ++ {DRVSTAT_INFO(forwarded_packets)}, ++ {DRVSTAT_INFO(rx_drops_mtu)}, ++ {DRVSTAT_INFO(eth_red_drops)}, ++ {DRVSTAT_INFO(be_on_die_temperature)} + }; + #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) + ++/* Stats related to multi RX queues */ ++static const struct be_ethtool_stat et_rx_stats[] = { ++ {DRVSTAT_RX_INFO(rx_bytes)}, ++ {DRVSTAT_RX_INFO(rx_pkts)}, ++ {DRVSTAT_RX_INFO(rx_rate)}, ++ {DRVSTAT_RX_INFO(rx_polls)}, ++ {DRVSTAT_RX_INFO(rx_events)}, ++ {DRVSTAT_RX_INFO(rx_compl)}, ++ {DRVSTAT_RX_INFO(rx_mcast_pkts)}, ++ {DRVSTAT_RX_INFO(rx_post_fail)}, ++ {DRVSTAT_RX_INFO(rx_drops_no_frags)} ++}; ++#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) ++ ++/* Stats related to multi TX queues */ ++static const struct be_ethtool_stat et_tx_stats[] = { ++ {DRVSTAT_TX_INFO(be_tx_rate)}, ++ {DRVSTAT_TX_INFO(be_tx_reqs)}, ++ {DRVSTAT_TX_INFO(be_tx_wrbs)}, ++ {DRVSTAT_TX_INFO(be_tx_stops)}, ++ {DRVSTAT_TX_INFO(be_tx_compl)}, ++ {DRVSTAT_TX_INFO(be_ipv6_ext_hdr_tx_drop)} ++}; ++#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) ++ ++static const char et_self_tests[][ETH_GSTRING_LEN] = { ++ "MAC Loopback test", ++ "PHY Loopback test", ++ "External Loopback test", ++ "DDR DMA test", ++ "Link test" ++}; ++ ++#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) ++#define BE_MAC_LOOPBACK 0x0 ++#define BE_PHY_LOOPBACK 0x1 ++#define BE_ONE_PORT_EXT_LOOPBACK 0x2 ++#define BE_NO_LOOPBACK 0xff ++ ++/* MAC speed valid values */ ++#define SPEED_DEFAULT 0x0 ++#define SPEED_FORCED_10GB 0x1 ++#define SPEED_FORCED_1GB 0x2 ++#define SPEED_AUTONEG_10GB 0x3 ++#define SPEED_AUTONEG_1GB 0x4 ++#define SPEED_AUTONEG_100MB 0x5 ++#define SPEED_AUTONEG_10GB_1GB 0x6 ++#define SPEED_AUTONEG_10GB_1GB_100MB 0x7 ++#define SPEED_AUTONEG_1GB_100MB 0x8 ++#define SPEED_AUTONEG_10MB 0x9 ++#define SPEED_AUTONEG_1GB_100MB_10MB 0xa ++#define SPEED_AUTONEG_100MB_10MB 0xb ++#define SPEED_FORCED_100MB 0xc ++#define SPEED_FORCED_10MB 0xd ++ ++ ++ + static void + be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) + { + struct be_adapter *adapter = netdev_priv(netdev); ++ int len; ++ char fw_on_flash[FW_VER_LEN]; ++ ++ memset(fw_on_flash, 0 , sizeof(fw_on_flash)); ++ ++ be_cmd_get_fw_ver(adapter, adapter->fw_ver, ++ fw_on_flash); + + strcpy(drvinfo->driver, DRV_NAME); + strcpy(drvinfo->version, DRV_VER); ++ + strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN); ++ if (memcmp(adapter->fw_ver, fw_on_flash, ++ FW_VER_LEN) != 0) { ++ len = strlen(drvinfo->fw_version); ++ strncpy(drvinfo->fw_version+len, " [", ++ FW_VER_LEN-len-1); ++ len = strlen(drvinfo->fw_version); ++ strncpy(drvinfo->fw_version+len, fw_on_flash, ++ FW_VER_LEN-len-1); ++ len = strlen(drvinfo->fw_version); ++ strncpy(drvinfo->fw_version+len, "]", FW_VER_LEN-len-1); ++ } ++ + strcpy(drvinfo->bus_info, pci_name(adapter->pdev)); + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; +@@ -122,12 +181,37 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) + } + + static int ++be_get_reg_len(struct net_device *netdev) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ u32 log_size = 0; ++ ++ if (be_physfn(adapter)) ++ be_cmd_get_reg_len(adapter, &log_size); ++ ++ return log_size; ++} ++ ++static void ++be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ ++ if (be_physfn(adapter)) { ++ memset(buf, 0, regs->len); ++ be_cmd_get_regs(adapter, regs->len, buf); ++ } ++} ++ ++static int + be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) + { + struct be_adapter *adapter = netdev_priv(netdev); +- struct be_eq_obj *rx_eq = &adapter->rx_eq; ++ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + ++ coalesce->rx_max_coalesced_frames = adapter->max_rx_coal; ++ + coalesce->rx_coalesce_usecs = rx_eq->cur_eqd; + coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd; + coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd; +@@ -149,25 +233,52 @@ static int + be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) + { + struct be_adapter *adapter = netdev_priv(netdev); +- struct be_eq_obj *rx_eq = &adapter->rx_eq; ++ struct be_rx_obj *rxo; ++ struct be_eq_obj *rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + u32 tx_max, tx_min, tx_cur; + u32 rx_max, rx_min, rx_cur; +- int status = 0; ++ int status = 0, i; + + if (coalesce->use_adaptive_tx_coalesce == 1) + return -EINVAL; ++ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; ++ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME) ++ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; + +- /* if AIC is being turned on now, start with an EQD of 0 */ +- if (rx_eq->enable_aic == 0 && +- coalesce->use_adaptive_rx_coalesce == 1) { +- rx_eq->cur_eqd = 0; ++ for_all_rx_queues(adapter, rxo, i) { ++ rx_eq = &rxo->rx_eq; ++ ++ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce) ++ rx_eq->cur_eqd = 0; ++ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce; ++ ++ rx_max = coalesce->rx_coalesce_usecs_high; ++ rx_min = coalesce->rx_coalesce_usecs_low; ++ rx_cur = coalesce->rx_coalesce_usecs; ++ ++ if (rx_eq->enable_aic) { ++ if (rx_max > BE_MAX_EQD) ++ rx_max = BE_MAX_EQD; ++ if (rx_min > rx_max) ++ rx_min = rx_max; ++ rx_eq->max_eqd = rx_max; ++ rx_eq->min_eqd = rx_min; ++ if (rx_eq->cur_eqd > rx_max) ++ rx_eq->cur_eqd = rx_max; ++ if (rx_eq->cur_eqd < rx_min) ++ rx_eq->cur_eqd = rx_min; ++ } else { ++ if (rx_cur > BE_MAX_EQD) ++ rx_cur = BE_MAX_EQD; ++ if (rx_eq->cur_eqd != rx_cur) { ++ status = be_cmd_modify_eqd(adapter, rx_eq->q.id, ++ rx_cur); ++ if (!status) ++ rx_eq->cur_eqd = rx_cur; ++ } ++ } + } +- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce; +- +- rx_max = coalesce->rx_coalesce_usecs_high; +- rx_min = coalesce->rx_coalesce_usecs_low; +- rx_cur = coalesce->rx_coalesce_usecs; + + tx_max = coalesce->tx_coalesce_usecs_high; + tx_min = coalesce->tx_coalesce_usecs_low; +@@ -181,27 +292,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) + tx_eq->cur_eqd = tx_cur; + } + +- if (rx_eq->enable_aic) { +- if (rx_max > BE_MAX_EQD) +- rx_max = BE_MAX_EQD; +- if (rx_min > rx_max) +- rx_min = rx_max; +- rx_eq->max_eqd = rx_max; +- rx_eq->min_eqd = rx_min; +- if (rx_eq->cur_eqd > rx_max) +- rx_eq->cur_eqd = rx_max; +- if (rx_eq->cur_eqd < rx_min) +- rx_eq->cur_eqd = rx_min; +- } else { +- if (rx_cur > BE_MAX_EQD) +- rx_cur = BE_MAX_EQD; +- if (rx_eq->cur_eqd != rx_cur) { +- status = be_cmd_modify_eqd(adapter, rx_eq->q.id, +- rx_cur); +- if (!status) +- rx_eq->cur_eqd = rx_cur; +- } +- } + return 0; + } + +@@ -229,81 +319,294 @@ be_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) + { + struct be_adapter *adapter = netdev_priv(netdev); +- struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats; +- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); +- struct be_rxf_stats *rxf_stats = &hw_stats->rxf; +- struct be_port_rxf_stats *port_stats = +- &rxf_stats->port[adapter->port_num]; +- struct net_device_stats *net_stats = &adapter->stats.net_stats; +- struct be_erx_stats *erx_stats = &hw_stats->erx; ++ struct be_rx_obj *rxo; ++ struct be_tx_obj *txo; + void *p = NULL; +- int i; ++ int i, j, base; + + for (i = 0; i < ETHTOOL_STATS_NUM; i++) { + switch (et_stats[i].type) { + case NETSTAT: +- p = net_stats; ++ p = &adapter->net_stats; + break; + case DRVSTAT: +- p = drvr_stats; +- break; +- case PORTSTAT: +- p = port_stats; +- break; +- case MISCSTAT: +- p = rxf_stats; +- break; +- case ERXSTAT: /* Currently only one ERX stat is provided */ +- p = (u32 *)erx_stats + adapter->rx_obj.q.id; ++ p = &adapter->drv_stats; + break; + } + + p = (u8 *)p + et_stats[i].offset; + data[i] = (et_stats[i].size == sizeof(u64)) ? +- *(u64 *)p: *(u32 *)p; ++ *(u64 *)p:(*(u32 *)p); + } + +- return; ++ base = ETHTOOL_STATS_NUM; ++ for_all_rx_queues(adapter, rxo, j) { ++ for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) { ++ p = (u8 *)&rxo->stats + et_rx_stats[i].offset; ++ data[base + j * ETHTOOL_RXSTATS_NUM + i] = ++ (et_rx_stats[i].size == sizeof(u64)) ? ++ *(u64 *)p: *(u32 *)p; ++ } ++ } ++ ++ base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM; ++ for_all_tx_queues(adapter, txo, j) { ++ for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) { ++ p = (u8 *)&txo->stats + et_tx_stats[i].offset; ++ data[base + j * ETHTOOL_TXSTATS_NUM + i] = ++ (et_tx_stats[i].size == sizeof(u64)) ? ++ *(u64 *)p: *(u32 *)p; ++ } ++ } + } + + static void + be_get_stat_strings(struct net_device *netdev, uint32_t stringset, + uint8_t *data) + { +- int i; ++ struct be_adapter *adapter = netdev_priv(netdev); ++ int i, j; ++ + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ETHTOOL_STATS_NUM; i++) { + memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } ++ for (i = 0; i < adapter->num_rx_qs; i++) { ++ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) { ++ sprintf(data, "rxq%d: %s", i, ++ et_rx_stats[j].desc); ++ data += ETH_GSTRING_LEN; ++ } ++ } ++ for (i = 0; i < adapter->num_tx_qs; i++) { ++ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) { ++ sprintf(data, "txq%d: %s", i, ++ et_tx_stats[j].desc); ++ data += ETH_GSTRING_LEN; ++ } ++ } ++ break; ++ case ETH_SS_TEST: ++ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { ++ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN); ++ data += ETH_GSTRING_LEN; ++ } + break; + } + } + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) + static int be_get_stats_count(struct net_device *netdev) + { +- return ETHTOOL_STATS_NUM; ++ struct be_adapter *adapter = netdev_priv(netdev); ++ ++ return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM ++ + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM; + } ++static int ++be_self_test_count(struct net_device *dev) ++{ ++ return ETHTOOL_TESTS_NUM; ++} ++#else ++ ++static int be_get_sset_count(struct net_device *netdev, int stringset) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ ++ switch (stringset) { ++ case ETH_SS_TEST: ++ return ETHTOOL_TESTS_NUM; ++ case ETH_SS_STATS: ++ return ETHTOOL_STATS_NUM + ++ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM + ++ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM; ++ default: ++ return -EINVAL; ++ } ++} ++#endif + + static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) + { +- ecmd->speed = SPEED_10000; ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct be_phy_info phy_info; ++ u8 mac_speed = 0; ++ u16 link_speed = 0; ++ int link_status = LINK_DOWN; ++ int status; ++ ++ if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { ++ status = be_cmd_link_status_query(adapter, &link_status, ++ &mac_speed, &link_speed, 0); ++ ++ be_link_status_update(adapter, link_status); ++ /* link_speed is in units of 10 Mbps */ ++ if (link_speed) { ++ ecmd->speed = link_speed*10; ++ } else { ++ switch (mac_speed) { ++ case PHY_LINK_SPEED_10MBPS: ++ ecmd->speed = SPEED_10; ++ break; ++ case PHY_LINK_SPEED_100MBPS: ++ ecmd->speed = SPEED_100; ++ break; ++ case PHY_LINK_SPEED_1GBPS: ++ ecmd->speed = SPEED_1000; ++ break; ++ case PHY_LINK_SPEED_10GBPS: ++ ecmd->speed = SPEED_10000; ++ break; ++ case PHY_LINK_SPEED_ZERO: ++ ecmd->speed = 0; ++ break; ++ } ++ } ++ ++ status = be_cmd_get_phy_info(adapter, &phy_info); ++ if (!status) { ++ switch (phy_info.interface_type) { ++ case PHY_TYPE_XFP_10GB: ++ case PHY_TYPE_SFP_1GB: ++ case PHY_TYPE_SFP_PLUS_10GB: ++ ecmd->port = PORT_FIBRE; ++ break; ++ default: ++ ecmd->port = PORT_TP; ++ break; ++ } ++ ++ switch (phy_info.interface_type) { ++ case PHY_TYPE_KR_10GB: ++ case PHY_TYPE_KX4_10GB: ++ ecmd->transceiver = XCVR_INTERNAL; ++ break; ++ default: ++ ecmd->transceiver = XCVR_EXTERNAL; ++ break; ++ } ++ ++ if (phy_info.auto_speeds_supported) { ++ ecmd->supported |= SUPPORTED_Autoneg; ++ ecmd->autoneg = AUTONEG_ENABLE; ++ ecmd->advertising |= ADVERTISED_Autoneg; ++ } ++ ++ if (phy_info.misc_params & BE_PAUSE_SYM_EN) { ++ ecmd->supported |= SUPPORTED_Pause; ++ ecmd->advertising |= ADVERTISED_Pause; ++ } ++ ++ } ++ ++ /* Save for future use */ ++ adapter->link_speed = ecmd->speed; ++ adapter->port_type = ecmd->port; ++ adapter->transceiver = ecmd->transceiver; ++ adapter->autoneg = ecmd->autoneg; ++ } else { ++ ecmd->speed = adapter->link_speed; ++ ecmd->port = adapter->port_type; ++ ecmd->transceiver = adapter->transceiver; ++ ecmd->autoneg = adapter->autoneg; ++ } ++ + ecmd->duplex = DUPLEX_FULL; +- ecmd->autoneg = AUTONEG_DISABLE; ++ ecmd->phy_address = (adapter->hba_port_num << 4) | ++ (adapter->port_name[adapter->hba_port_num]); ++ switch (ecmd->port) { ++ case PORT_FIBRE: ++ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); ++ break; ++ case PORT_TP: ++ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP); ++ break; ++ } ++ ++ if (ecmd->autoneg) { ++ ecmd->supported |= SUPPORTED_1000baseT_Full; ++ ecmd->advertising |= (ADVERTISED_10000baseT_Full | ++ ADVERTISED_1000baseT_Full); ++ } ++ + return 0; + } + ++static int be_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *ecmd) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct be_phy_info phy_info; ++ u16 mac_speed=0; ++ u16 dac_cable_len=0; ++ u16 port_speed = 0; ++ int status; ++ ++ status = be_cmd_get_phy_info(adapter, &phy_info); ++ if (status) { ++ dev_warn(&adapter->pdev->dev, "port speed set failed.\n"); ++ return status; ++ } ++ ++ if (ecmd->autoneg == AUTONEG_ENABLE) { ++ switch(phy_info.interface_type) { ++ case PHY_TYPE_SFP_1GB: ++ case PHY_TYPE_BASET_1GB: ++ case PHY_TYPE_BASEX_1GB: ++ case PHY_TYPE_SGMII: ++ mac_speed = SPEED_AUTONEG_1GB_100MB_10MB; ++ break; ++ case PHY_TYPE_SFP_PLUS_10GB: ++ dev_warn(&adapter->pdev->dev, ++ "Autoneg not supported on this module. \n"); ++ return -EINVAL; ++ case PHY_TYPE_KR_10GB: ++ case PHY_TYPE_KX4_10GB: ++ mac_speed = SPEED_AUTONEG_10GB_1GB; ++ break; ++ case PHY_TYPE_BASET_10GB: ++ mac_speed = SPEED_AUTONEG_10GB_1GB_100MB; ++ break; ++ } ++ } else if(ecmd->autoneg == AUTONEG_DISABLE) { ++ if(ecmd->speed == SPEED_10) { ++ mac_speed = SPEED_FORCED_10MB; ++ } else if(ecmd->speed == SPEED_100) { ++ mac_speed = SPEED_FORCED_100MB; ++ } else if(ecmd->speed == SPEED_1000) { ++ mac_speed = SPEED_FORCED_1GB; ++ } else if(ecmd->speed == SPEED_10000) { ++ mac_speed = SPEED_FORCED_10GB; ++ } ++ } ++ ++ status = be_cmd_get_port_speed(adapter, adapter->hba_port_num, ++ &dac_cable_len, &port_speed); ++ ++ if (!status && port_speed != mac_speed) ++ status = be_cmd_set_port_speed_v1(adapter, ++ adapter->hba_port_num, mac_speed, ++ dac_cable_len); ++ if (status) ++ dev_warn(&adapter->pdev->dev, "port speed set failed.\n"); ++ ++ return status; ++ ++} ++ + static void + be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) + { + struct be_adapter *adapter = netdev_priv(netdev); + +- ring->rx_max_pending = adapter->rx_obj.q.len; +- ring->tx_max_pending = adapter->tx_obj.q.len; ++ ring->rx_max_pending = adapter->rx_obj[0].q.len; ++ ring->tx_max_pending = adapter->tx_obj[0].q.len; + +- ring->rx_pending = atomic_read(&adapter->rx_obj.q.used); +- ring->tx_pending = atomic_read(&adapter->tx_obj.q.used); ++ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used); ++ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used); + } + + static void +@@ -312,7 +615,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) + struct be_adapter *adapter = netdev_priv(netdev); + + be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); +- ecmd->autoneg = 0; ++ ecmd->autoneg = adapter->autoneg; + } + + static int +@@ -334,6 +637,203 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) + return status; + } + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) ++static int ++be_phys_id(struct net_device *netdev, u32 data) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ int status; ++ u32 cur; ++ ++ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur); ++ ++ if (cur == BEACON_STATE_ENABLED) ++ return 0; ++ ++ if (data < 2) ++ data = 2; ++ ++ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, ++ BEACON_STATE_ENABLED); ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(data*HZ); ++ ++ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, ++ BEACON_STATE_DISABLED); ++ ++ return status; ++} ++#else ++static int ++be_set_phys_id(struct net_device *netdev, ++ enum ethtool_phys_id_state state) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ ++ switch (state) { ++ case ETHTOOL_ID_ACTIVE: ++ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, ++ &adapter->beacon_state); ++ return 1; /* cycle on/off once per second */ ++ ++ case ETHTOOL_ID_ON: ++ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, ++ BEACON_STATE_ENABLED); ++ break; ++ ++ case ETHTOOL_ID_OFF: ++ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, ++ BEACON_STATE_DISABLED); ++ break; ++ ++ case ETHTOOL_ID_INACTIVE: ++ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, ++ adapter->beacon_state); ++ } ++ ++ return 0; ++} ++#endif ++ ++static bool ++be_is_wol_supported(struct be_adapter *adapter) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ ++ if (!be_physfn(adapter)) ++ return false; ++ ++ switch (pdev->subsystem_device) { ++ case OC_SUBSYS_DEVICE_ID1: ++ case OC_SUBSYS_DEVICE_ID2: ++ case OC_SUBSYS_DEVICE_ID3: ++ case OC_SUBSYS_DEVICE_ID4: ++ return false; ++ default: ++ return true; ++ } ++} ++ ++static void ++be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ ++ if (be_is_wol_supported(adapter)) ++ wol->supported = WAKE_MAGIC; ++ if (adapter->wol) ++ wol->wolopts = WAKE_MAGIC; ++ else ++ wol->wolopts = 0; ++ memset(&wol->sopass, 0, sizeof(wol->sopass)); ++} ++ ++static int ++be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ ++ if (wol->wolopts & ~WAKE_MAGIC) ++ return -EOPNOTSUPP; ++ ++ if (!be_is_wol_supported(adapter)) { ++ dev_warn(&adapter->pdev->dev, ++ "WOL not supported for this subsystemid: %x\n", ++ adapter->pdev->subsystem_device); ++ return -EOPNOTSUPP; ++ } ++ ++ if (wol->wolopts & WAKE_MAGIC) ++ adapter->wol = true; ++ else ++ adapter->wol = false; ++ ++ return 0; ++} ++ ++static int ++be_test_ddr_dma(struct be_adapter *adapter) ++{ ++ int ret, i; ++ struct be_dma_mem ddrdma_cmd; ++ u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL}; ++ ++ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ++ ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, ++ &ddrdma_cmd.dma); ++ if (!ddrdma_cmd.va) { ++ dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < 2; i++) { ++ ret = be_cmd_ddr_dma_test(adapter, pattern[i], ++ 4096, &ddrdma_cmd); ++ if (ret != 0) ++ goto err; ++ } ++ ++err: ++ pci_free_consistent(adapter->pdev, ddrdma_cmd.size, ++ ddrdma_cmd.va, ddrdma_cmd.dma); ++ return ret; ++} ++ ++static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, ++ u64 *status) ++{ ++ be_cmd_set_loopback(adapter, adapter->hba_port_num, ++ loopback_type, 1); ++ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, ++ loopback_type, 1500, ++ 2, 0xabc); ++ be_cmd_set_loopback(adapter, adapter->hba_port_num, ++ BE_NO_LOOPBACK, 1); ++ return *status; ++} ++ ++static void ++be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ int link_status; ++ u8 mac_speed = 0; ++ u16 qos_link_speed = 0; ++ ++ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); ++ ++ if (test->flags & ETH_TEST_FL_OFFLINE) { ++ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, ++ &data[0]) != 0) { ++ test->flags |= ETH_TEST_FL_FAILED; ++ } ++ if (be_loopback_test(adapter, BE_PHY_LOOPBACK, ++ &data[1]) != 0) { ++ test->flags |= ETH_TEST_FL_FAILED; ++ } ++ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, ++ &data[2]) != 0) { ++ test->flags |= ETH_TEST_FL_FAILED; ++ } ++ } ++ ++ if (be_test_ddr_dma(adapter) != 0) { ++ data[3] = 1; ++ test->flags |= ETH_TEST_FL_FAILED; ++ } ++ ++ if (be_cmd_link_status_query(adapter, &link_status, &mac_speed, ++ &qos_link_speed, 0) != 0) { ++ test->flags |= ETH_TEST_FL_FAILED; ++ data[4] = -1; ++ } else if (!mac_speed) { ++ test->flags |= ETH_TEST_FL_FAILED; ++ data[4] = 1; ++ } ++ ++} ++ ++#ifdef HAVE_ETHTOOL_FLASH + static int + be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) + { +@@ -347,11 +847,73 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) + + return be_load_fw(adapter, file_name); + } ++#endif + +-const struct ethtool_ops be_ethtool_ops = { ++static int ++be_get_eeprom_len(struct net_device *netdev) ++{ ++ return BE_READ_SEEPROM_LEN; ++} ++ ++static int ++be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, ++ uint8_t *data) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct be_dma_mem eeprom_cmd; ++ struct be_cmd_resp_seeprom_read *resp; ++ int status; ++ ++ if (!eeprom->len) ++ return -EINVAL; ++ ++ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); ++ ++ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); ++ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); ++ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size, ++ &eeprom_cmd.dma); ++ ++ if (!eeprom_cmd.va) { ++ dev_err(&adapter->pdev->dev, ++ "Memory allocation failure. Could not read eeprom\n"); ++ return -ENOMEM; ++ } ++ ++ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); ++ ++ if (!status) { ++ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va; ++ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); ++ } ++ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va, ++ eeprom_cmd.dma); ++ ++ return status; ++} ++ ++static int be_set_tso(struct net_device *netdev, uint32_t data) ++{ ++ if (data) { ++ netdev->features |= NETIF_F_TSO; ++ netdev->features |= NETIF_F_TSO6; ++ } else { ++ netdev->features &= ~NETIF_F_TSO; ++ netdev->features &= ~NETIF_F_TSO6; ++ } ++ return 0; ++} ++ ++ ++struct ethtool_ops be_ethtool_ops = { + .get_settings = be_get_settings, ++ .set_settings = be_set_settings, + .get_drvinfo = be_get_drvinfo, ++ .get_wol = be_get_wol, ++ .set_wol = be_set_wol, + .get_link = ethtool_op_get_link, ++ .get_eeprom_len = be_get_eeprom_len, ++ .get_eeprom = be_read_eeprom, + .get_coalesce = be_get_coalesce, + .set_coalesce = be_set_coalesce, + .get_ringparam = be_get_ringparam, +@@ -364,9 +926,21 @@ const struct ethtool_ops be_ethtool_ops = { + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, +- .set_tso = ethtool_op_set_tso, ++ .set_tso = be_set_tso, + .get_strings = be_get_stat_strings, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) ++ .phys_id = be_phys_id, + .get_stats_count = be_get_stats_count, ++ .self_test_count = be_self_test_count, ++#else ++ .set_phys_id = be_set_phys_id, ++ .get_sset_count = be_get_sset_count, ++#endif + .get_ethtool_stats = be_get_ethtool_stats, ++ .get_regs_len = be_get_reg_len, ++ .get_regs = be_get_regs, ++#ifdef HAVE_ETHTOOL_FLASH + .flash_device = be_do_flash, ++#endif ++ .self_test = be_self_test + }; +diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h +index a3394b4..f871d8c 100644 +--- a/drivers/net/benet/be_hw.h ++++ b/drivers/net/benet/be_hw.h +@@ -1,18 +1,18 @@ + /* +- * Copyright (C) 2005 - 2009 ServerEngines ++ * Copyright (C) 2005 - 2011 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation. The full GNU General ++ * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: +- * linux-drivers@serverengines.com ++ * linux-drivers@emulex.com + * +- * ServerEngines +- * 209 N. Fair Oaks Ave +- * Sunnyvale, CA 94085 ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 + */ + + /********* Mailbox door bell *************/ +@@ -26,24 +26,34 @@ + * queue entry. + */ + #define MPU_MAILBOX_DB_OFFSET 0x160 +-#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */ ++#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */ + #define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */ + +-#define MPU_EP_CONTROL 0 ++#define MPU_EP_CONTROL 0 + + /********** MPU semphore ******************/ +-#define MPU_EP_SEMAPHORE_OFFSET 0xac ++#define MPU_EP_SEMAPHORE_OFFSET 0xac ++#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400 + #define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF + #define EP_SEMAPHORE_POST_ERR_MASK 0x1 + #define EP_SEMAPHORE_POST_ERR_SHIFT 31 + /* MPU semphore POST stage values */ +-#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ +-#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ ++#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ ++#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ + #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ + #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ + ++/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */ ++#define SLIPORT_STATUS_OFFSET 0x404 ++#define SLIPORT_CONTROL_OFFSET 0x408 ++ ++#define SLIPORT_STATUS_ERR_MASK 0x80000000 ++#define SLIPORT_STATUS_RN_MASK 0x01000000 ++#define SLIPORT_STATUS_RDY_MASK 0x00800000 ++#define SLI_PORT_CONTROL_IP_MASK 0x08000000 ++ + /********* Memory BAR register ************/ +-#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc ++#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc + /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt + * Disable" may still globally block interrupts in addition to individual + * interrupt masks; a mechanism for the device driver to block all interrupts +@@ -52,13 +62,70 @@ + */ + #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ + ++/********* Link Status CSR ****************/ ++#define PCICFG_PCIE_LINK_STATUS_OFFSET 0xd0 ++#define PCIE_LINK_STATUS_SPEED_MASK 0xFF /* bits 16 - 19 */ ++#define PCIE_LINK_STATUS_SPEED_SHIFT 16 ++#define PCIE_LINK_STATUS_NEG_WIDTH_MASK 0x3F /* bits 20 - 25 */ ++#define PCIE_LINK_STATUS_NEG_WIDTH_SHIFT 20 ++ ++/********* Link Capability CSR ************/ ++#define PCICFG_PCIE_LINK_CAP_OFFSET 0xcc ++#define PCIE_LINK_CAP_MAX_SPEED_MASK 0xFF /* bits 0 - 3 */ ++#define PCIE_LINK_CAP_MAX_SPEED_SHIFT 0 ++#define PCIE_LINK_CAP_MAX_WIDTH_MASK 0x3F /* bits 4 - 9 */ ++#define PCIE_LINK_CAP_MAX_WIDTH_SHIFT 4 ++ ++/********* PCI Function Capability ************/ ++#define BE_FUNCTION_CAPS_UNCLASSIFIED_STATS 0x1 ++#define BE_FUNCTION_CAPS_RSS 0x2 ++#define BE_FUNCTION_CAPS_PROMISCUOUS 0x4 ++#define BE_FUNCTION_CAPS_LEGACY_MODE 0x8 ++ ++/********* Power managment (WOL) **********/ ++#define PCICFG_PM_CONTROL_OFFSET 0x44 ++#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */ ++ ++/********* Online Control Registers *******/ ++#define PCICFG_ONLINE0 0xB0 ++#define PCICFG_ONLINE1 0xB4 ++ ++/********* UE Status and Mask Registers ***/ ++#define PCICFG_UE_STATUS_LOW 0xA0 ++#define PCICFG_UE_STATUS_HIGH 0xA4 ++#define PCICFG_UE_STATUS_LOW_MASK 0xA8 ++#define PCICFG_UE_STATUS_HI_MASK 0xAC ++ ++/******** SLI_INTF ***********************/ ++#define SLI_INTF_REG_OFFSET 0x58 ++#define SLI_INTF_VALID_MASK 0xE0000000 ++#define SLI_INTF_VALID 0xC0000000 ++#define SLI_INTF_HINT2_MASK 0x1F000000 ++#define SLI_INTF_HINT2_SHIFT 24 ++#define SLI_INTF_HINT1_MASK 0x00FF0000 ++#define SLI_INTF_HINT1_SHIFT 16 ++#define SLI_INTF_FAMILY_MASK 0x00000F00 ++#define SLI_INTF_FAMILY_SHIFT 8 ++#define SLI_INTF_IF_TYPE_MASK 0x0000F000 ++#define SLI_INTF_IF_TYPE_SHIFT 12 ++#define SLI_INTF_REV_MASK 0x000000F0 ++#define SLI_INTF_REV_SHIFT 4 ++#define SLI_INTF_FT_MASK 0x00000001 ++ ++/* SLI family */ ++#define BE_SLI_FAMILY 0x0 ++#define LANCER_A0_SLI_FAMILY 0xA ++ + /********* ISR0 Register offset **********/ +-#define CEV_ISR0_OFFSET 0xC18 ++#define CEV_ISR0_OFFSET 0xC18 + #define CEV_ISR_SIZE 4 + + /********* Event Q door bell *************/ + #define DB_EQ_OFFSET DB_CQ_OFFSET + #define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ ++#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */ ++#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */ ++ + /* Clear the interrupt for this eq */ + #define DB_EQ_CLR_SHIFT (9) /* bit 9 */ + /* Must be 1 */ +@@ -69,12 +136,16 @@ + #define DB_EQ_REARM_SHIFT (29) /* bit 29 */ + + /********* Compl Q door bell *************/ +-#define DB_CQ_OFFSET 0x120 ++#define DB_CQ_OFFSET 0x120 + #define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ ++#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */ ++#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14 ++ placing at 11-15 */ ++ + /* Number of event entries processed */ +-#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ ++#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ + /* Rearm bit */ +-#define DB_CQ_REARM_SHIFT (29) /* bit 29 */ ++#define DB_CQ_REARM_SHIFT (29) /* bit 29 */ + + /********** TX ULP door bell *************/ + #define DB_TXULP1_OFFSET 0x60 +@@ -84,25 +155,103 @@ + #define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */ + + /********** RQ(erx) door bell ************/ +-#define DB_RQ_OFFSET 0x100 ++#define DB_RQ_OFFSET 0x100 + #define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ + /* Number of rx frags posted */ + #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ + + /********** MCC door bell ************/ +-#define DB_MCCQ_OFFSET 0x140 ++#define DB_MCCQ_OFFSET 0x140 + #define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */ + /* Number of entries posted */ + #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ + ++/********** SRIOV VF PCICFG OFFSET ********/ ++#define SRIOV_VF_PCICFG_OFFSET (4096) ++ ++/********** FAT TABLE ********/ ++#define RETRIEVE_FAT 0 ++#define QUERY_FAT 1 ++ ++/* Flashrom related descriptors */ ++#define IMAGE_TYPE_FIRMWARE 160 ++#define IMAGE_TYPE_BOOTCODE 224 ++#define IMAGE_TYPE_OPTIONROM 32 ++ ++#define NUM_FLASHDIR_ENTRIES 32 ++ ++#define IMG_TYPE_ISCSI_ACTIVE 0 ++#define IMG_TYPE_REDBOOT 1 ++#define IMG_TYPE_BIOS 2 ++#define IMG_TYPE_PXE_BIOS 3 ++#define IMG_TYPE_FCOE_BIOS 8 ++#define IMG_TYPE_ISCSI_BACKUP 9 ++#define IMG_TYPE_FCOE_FW_ACTIVE 10 ++#define IMG_TYPE_FCOE_FW_BACKUP 11 ++#define IMG_TYPE_NCSI_FW 13 ++#define IMG_TYPE_PHY_FW 99 ++#define TN_8022 13 ++ ++#define ILLEGAL_IOCTL_REQ 2 ++#define FLASHROM_OPER_PHY_FLASH 9 ++#define FLASHROM_OPER_PHY_SAVE 10 ++#define FLASHROM_OPER_FLASH 1 ++#define FLASHROM_OPER_SAVE 2 ++#define FLASHROM_OPER_REPORT 4 ++ ++#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */ ++#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */ ++#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */ ++#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */ ++#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */ ++#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */ ++#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144) ++#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 (262144) ++ ++#define FLASH_NCSI_MAGIC (0x16032009) ++#define FLASH_NCSI_DISABLED (0) ++#define FLASH_NCSI_ENABLED (1) ++ ++#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000) ++ ++/* Offsets for components on Flash. */ ++#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576) ++#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296) ++#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016) ++#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736) ++#define FLASH_iSCSI_BIOS_START_g2 (7340032) ++#define FLASH_PXE_BIOS_START_g2 (7864320) ++#define FLASH_FCoE_BIOS_START_g2 (524288) ++#define FLASH_REDBOOT_START_g2 (0) ++ ++#define FLASH_NCSI_START_g3 (15990784) ++#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152) ++#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304) ++#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456) ++#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608) ++#define FLASH_iSCSI_BIOS_START_g3 (12582912) ++#define FLASH_PXE_BIOS_START_g3 (13107200) ++#define FLASH_FCoE_BIOS_START_g3 (13631488) ++#define FLASH_REDBOOT_START_g3 (262144) ++#define FLASH_PHY_FW_START_g3 (1310720) ++ ++/************* Rx Packet Type Encoding **************/ ++#define BE_UNICAST_PACKET 0 ++#define BE_MULTICAST_PACKET 1 ++#define BE_BROADCAST_PACKET 2 ++#define BE_RSVD_PACKET 3 ++ + /* + * BE descriptors: host memory data structures whose formats + * are hardwired in BE silicon. + */ + /* Event Queue Descriptor */ +-#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */ +-#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */ +-#define EQ_ENTRY_RES_ID_SHIFT 16 ++#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */ ++#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */ ++#define EQ_ENTRY_RES_ID_SHIFT 16 ++ ++#define BE_MAC_PROMISCUOUS 62 /* Promiscuous mode */ ++ + struct be_eq_entry { + u32 evt; + }; +@@ -126,7 +275,7 @@ struct amap_eth_hdr_wrb { + u8 event; + u8 crc; + u8 forward; +- u8 ipsec; ++ u8 lso6; + u8 mgmt; + u8 ipcs; + u8 udpcs; +@@ -151,7 +300,7 @@ struct be_eth_hdr_wrb { + * offset/shift/mask of each field */ + struct amap_eth_tx_compl { + u8 wrb_index[16]; /* dword 0 */ +- u8 ct[2]; /* dword 0 */ ++ u8 ct[2]; /* dword 0 */ + u8 port[2]; /* dword 0 */ + u8 rsvd0[8]; /* dword 0 */ + u8 status[4]; /* dword 0 */ +@@ -179,10 +328,10 @@ struct be_eth_rx_d { + + /* RX Compl Queue Descriptor */ + +-/* Pseudo amap definition for eth_rx_compl in which each bit of the +- * actual structure is defined as a byte: used to calculate ++/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which ++ * each bit of the actual structure is defined as a byte: used to calculate + * offset/shift/mask of each field */ +-struct amap_eth_rx_compl { ++struct amap_eth_rx_compl_v0 { + u8 vlan_tag[16]; /* dword 0 */ + u8 pktsize[14]; /* dword 0 */ + u8 port; /* dword 0 */ +@@ -213,39 +362,91 @@ struct amap_eth_rx_compl { + u8 rsshash[32]; /* dword 3 */ + } __packed; + ++/* Pseudo amap definition for BE3 native mode eth_rx_compl in which ++ * each bit of the actual structure is defined as a byte: used to calculate ++ * offset/shift/mask of each field */ ++struct amap_eth_rx_compl_v1 { ++ u8 vlan_tag[16]; /* dword 0 */ ++ u8 pktsize[14]; /* dword 0 */ ++ u8 vtp; /* dword 0 */ ++ u8 ip_opt; /* dword 0 */ ++ u8 err; /* dword 1 */ ++ u8 rsshp; /* dword 1 */ ++ u8 ipf; /* dword 1 */ ++ u8 tcpf; /* dword 1 */ ++ u8 udpf; /* dword 1 */ ++ u8 ipcksm; /* dword 1 */ ++ u8 l4_cksm; /* dword 1 */ ++ u8 ip_version; /* dword 1 */ ++ u8 macdst[7]; /* dword 1 */ ++ u8 rsvd0; /* dword 1 */ ++ u8 fragndx[10]; /* dword 1 */ ++ u8 ct[2]; /* dword 1 */ ++ u8 sw; /* dword 1 */ ++ u8 numfrags[3]; /* dword 1 */ ++ u8 rss_flush; /* dword 2 */ ++ u8 cast_enc[2]; /* dword 2 */ ++ u8 vtm; /* dword 2 */ ++ u8 rss_bank; /* dword 2 */ ++ u8 port[2]; /* dword 2 */ ++ u8 vntagp; /* dword 2 */ ++ u8 header_len[8]; /* dword 2 */ ++ u8 header_split[2]; /* dword 2 */ ++ u8 rsvd1[13]; /* dword 2 */ ++ u8 valid; /* dword 2 */ ++ u8 rsshash[32]; /* dword 3 */ ++} __packed; ++ + struct be_eth_rx_compl { + u32 dw[4]; + }; + +-/* Flashrom related descriptors */ +-#define IMAGE_TYPE_FIRMWARE 160 +-#define IMAGE_TYPE_BOOTCODE 224 +-#define IMAGE_TYPE_OPTIONROM 32 ++struct mgmt_hba_attribs { ++ u8 flashrom_version_string[32]; ++ u8 manufacturer_name[32]; ++ u32 supported_modes; ++ u32 rsvd0[3]; ++ u8 ncsi_ver_string[12]; ++ u32 default_extended_timeout; ++ u8 controller_model_number[32]; ++ u8 controller_description[64]; ++ u8 controller_serial_number[32]; ++ u8 ip_version_string[32]; ++ u8 firmware_version_string[32]; ++ u8 bios_version_string[32]; ++ u8 redboot_version_string[32]; ++ u8 driver_version_string[32]; ++ u8 fw_on_flash_version_string[32]; ++ u32 functionalities_supported; ++ u16 max_cdblength; ++ u8 asic_revision; ++ u8 generational_guid[16]; ++ u8 hba_port_count; ++ u16 default_link_down_timeout; ++ u8 iscsi_ver_min_max; ++ u8 multifunction_device; ++ u8 cache_valid; ++ u8 hba_status; ++ u8 max_domains_supported; ++ u8 phy_port; ++ u32 firmware_post_status; ++ u32 hba_mtu[8]; ++ u32 rsvd1[4]; ++}; + +-#define NUM_FLASHDIR_ENTRIES 32 +- +-#define FLASHROM_TYPE_ISCSI_ACTIVE 0 +-#define FLASHROM_TYPE_BIOS 2 +-#define FLASHROM_TYPE_PXE_BIOS 3 +-#define FLASHROM_TYPE_FCOE_BIOS 8 +-#define FLASHROM_TYPE_ISCSI_BACKUP 9 +-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10 +-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11 +- +-#define FLASHROM_OPER_FLASH 1 +-#define FLASHROM_OPER_SAVE 2 +- +-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */ +-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */ +- +-/* Offsets for components on Flash. */ +-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576) +-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296) +-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016) +-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736) +-#define FLASH_iSCSI_BIOS_START (7340032) +-#define FLASH_PXE_BIOS_START (7864320) +-#define FLASH_FCoE_BIOS_START (524288) ++struct mgmt_controller_attrib { ++ struct mgmt_hba_attribs hba_attribs; ++ u16 pci_vendor_id; ++ u16 pci_device_id; ++ u16 pci_sub_vendor_id; ++ u16 pci_sub_system_id; ++ u8 pci_bus_number; ++ u8 pci_device_number; ++ u8 pci_function_number; ++ u8 interface_type; ++ u64 unique_identifier; ++ u32 rsvd0[5]; ++}; + + struct controller_id { + u32 vendor; +@@ -254,7 +455,20 @@ struct controller_id { + u32 subdevice; + }; + +-struct flash_file_hdr { ++struct flash_comp { ++ unsigned long offset; ++ int optype; ++ int size; ++}; ++ ++struct image_hdr { ++ u32 imageid; ++ u32 imageoffset; ++ u32 imagelength; ++ u32 image_checksum; ++ u8 image_version[32]; ++}; ++struct flash_file_hdr_g2 { + u8 sign[32]; + u32 cksum; + u32 antidote; +@@ -266,6 +480,17 @@ struct flash_file_hdr { + u8 build[24]; + }; + ++struct flash_file_hdr_g3 { ++ u8 sign[52]; ++ u8 ufi_version[4]; ++ u32 file_len; ++ u32 cksum; ++ u32 antidote; ++ u32 num_imgs; ++ u8 build[24]; ++ u8 rsvd[32]; ++}; ++ + struct flash_section_hdr { + u32 format_rev; + u32 cksum; +@@ -299,3 +524,19 @@ struct flash_section_info { + struct flash_section_hdr fsec_hdr; + struct flash_section_entry fsec_entry[32]; + }; ++ ++struct flash_ncsi_image_hdr { ++ u32 magic; ++ u8 hdr_len; ++ u8 type; ++ u16 hdr_ver; ++ u8 rsvd0[2]; ++ u16 load_offset; ++ u32 len; ++ u32 flash_offset; ++ u8 ver[16]; ++ u8 name[24]; ++ u32 img_cksum; ++ u8 rsvd1[4]; ++ u32 hdr_cksum; ++}; +diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c +index 000e377..f501aa3 100644 +--- a/drivers/net/benet/be_main.c ++++ b/drivers/net/benet/be_main.c +@@ -1,18 +1,18 @@ + /* +- * Copyright (C) 2005 - 2009 ServerEngines ++ * Copyright (C) 2005 - 2011 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 +- * as published by the Free Software Foundation. The full GNU General ++ * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: +- * linux-drivers@serverengines.com ++ * linux-drivers@emulex.com + * +- * ServerEngines +- * 209 N. Fair Oaks Ave +- * Sunnyvale, CA 94085 ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 + */ + + #include "be.h" +@@ -22,23 +22,119 @@ + MODULE_VERSION(DRV_VER); + MODULE_DEVICE_TABLE(pci, be_dev_ids); + MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); +-MODULE_AUTHOR("ServerEngines Corporation"); ++MODULE_AUTHOR("Emulex Corporation"); + MODULE_LICENSE("GPL"); ++MODULE_INFO(supported, "external"); + +-static unsigned int rx_frag_size = 2048; +-module_param(rx_frag_size, uint, S_IRUGO); +-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); ++static ushort rx_frag_size = 2048; ++static unsigned int num_vfs; ++static unsigned int msix = 1; ++module_param(rx_frag_size, ushort, S_IRUGO); ++module_param(num_vfs, uint, S_IRUGO); ++module_param(msix, uint, S_IRUGO); ++MODULE_PARM_DESC(rx_frag_size, "Size of receive fragment buffer" ++ " - 2048 (default), 4096 or 8192"); ++MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); ++MODULE_PARM_DESC(msix, "Enable and disable the MSI" ++ "x (By default MSIx is enabled)"); ++static unsigned int gro = 1; ++module_param(gro, uint, S_IRUGO); ++MODULE_PARM_DESC(gro, "Enable or Disable GRO. Enabled by default"); ++ ++static unsigned int multi_rxq = true; ++module_param(multi_rxq, uint, S_IRUGO); ++MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default"); + + static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, +- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, ++ /* ++ * Lancer is not part of Palau 4.0 ++ * { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, ++ */ + { 0 } + }; + MODULE_DEVICE_TABLE(pci, be_dev_ids); + ++/* UE Status Low CSR */ ++static char *ue_status_low_desc[] = { ++ "CEV", ++ "CTX", ++ "DBUF", ++ "ERX", ++ "Host", ++ "MPU", ++ "NDMA", ++ "PTC ", ++ "RDMA ", ++ "RXF ", ++ "RXIPS ", ++ "RXULP0 ", ++ "RXULP1 ", ++ "RXULP2 ", ++ "TIM ", ++ "TPOST ", ++ "TPRE ", ++ "TXIPS ", ++ "TXULP0 ", ++ "TXULP1 ", ++ "UC ", ++ "WDMA ", ++ "TXULP2 ", ++ "HOST1 ", ++ "P0_OB_LINK ", ++ "P1_OB_LINK ", ++ "HOST_GPIO ", ++ "MBOX ", ++ "AXGMAC0", ++ "AXGMAC1", ++ "JTAG", ++ "MPU_INTPEND" ++}; ++ ++/* UE Status High CSR */ ++static char *ue_status_hi_desc[] = { ++ "LPCMEMHOST", ++ "MGMT_MAC", ++ "PCS0ONLINE", ++ "MPU_IRAM", ++ "PCS1ONLINE", ++ "PCTL0", ++ "PCTL1", ++ "PMEM", ++ "RR", ++ "TXPB", ++ "RXPP", ++ "XAUI", ++ "TXP", ++ "ARM", ++ "IPC", ++ "HOST2", ++ "HOST3", ++ "HOST4", ++ "HOST5", ++ "HOST6", ++ "HOST7", ++ "HOST8", ++ "HOST9", ++ "NETC", ++ "Unknown", ++ "Unknown", ++ "Unknown", ++ "Unknown", ++ "Unknown", ++ "Unknown", ++ "Unknown", ++ "Unknown" ++}; ++ ++static inline bool be_multi_rxq(struct be_adapter *adapter) ++{ ++ return (adapter->num_rx_qs > 1); ++} ++ + static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) + { + struct be_dma_mem *mem = &q->dma_mem; +@@ -69,6 +165,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable) + u32 reg = ioread32(addr); + u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + ++ if (adapter->eeh_err) ++ return; ++ + if (!enabled && enable) + reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + else if (enabled && !enable) +@@ -84,6 +183,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) + u32 val = 0; + val |= qid & DB_RQ_RING_ID_MASK; + val |= posted << DB_RQ_NUM_POSTED_SHIFT; ++ ++ wmb(); + iowrite32(val, adapter->db + DB_RQ_OFFSET); + } + +@@ -92,6 +193,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted) + u32 val = 0; + val |= qid & DB_TXULP_RING_ID_MASK; + val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; ++ ++ wmb(); + iowrite32(val, adapter->db + DB_TXULP1_OFFSET); + } + +@@ -100,6 +203,12 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid, + { + u32 val = 0; + val |= qid & DB_EQ_RING_ID_MASK; ++ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << ++ DB_EQ_RING_ID_EXT_MASK_SHIFT); ++ ++ if (adapter->eeh_err) ++ return; ++ + if (arm) + val |= 1 << DB_EQ_REARM_SHIFT; + if (clear_int) +@@ -113,6 +222,12 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) + { + u32 val = 0; + val |= qid & DB_CQ_RING_ID_MASK; ++ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << ++ DB_CQ_RING_ID_EXT_MASK_SHIFT); ++ ++ if (adapter->eeh_err) ++ return; ++ + if (arm) + val |= 1 << DB_CQ_REARM_SHIFT; + val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; +@@ -124,96 +239,250 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) + struct be_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + int status = 0; ++ u8 current_mac[ETH_ALEN]; ++ u32 pmac_id = adapter->pmac_id; + +- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); ++ if (!is_valid_ether_addr(addr->sa_data)) ++ return -EADDRNOTAVAIL; ++ ++ status = be_cmd_mac_addr_query(adapter, current_mac, ++ MAC_ADDRESS_TYPE_NETWORK, false, ++ adapter->if_handle); + if (status) +- return status; ++ goto err; ++ ++ if (!memcmp(addr->sa_data, current_mac, ETH_ALEN)) ++ goto done; + + status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, +- adapter->if_handle, &adapter->pmac_id); +- if (!status) ++ adapter->if_handle, &adapter->pmac_id, 0); ++ ++ if (!status) { ++ status = be_cmd_pmac_del(adapter, adapter->if_handle, ++ pmac_id, 0); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ++ goto done; ++ } + +- return status; ++err: ++ if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) ++ return -EPERM; ++ else ++ dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", ++ addr->sa_data); ++done: ++ return status; ++} ++ ++static void populate_be2_stats(struct be_adapter *adapter) ++{ ++ ++ struct be_drv_stats *drvs = &adapter->drv_stats; ++ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter); ++ struct be_port_rxf_stats_v0 *port_stats = ++ be_port_rxf_stats_from_cmd(adapter); ++ struct be_rxf_stats_v0 *rxf_stats = ++ be_rxf_stats_from_cmd(adapter); ++ ++ drvs->rx_pause_frames = port_stats->rx_pause_frames; ++ drvs->rx_crc_errors = port_stats->rx_crc_errors; ++ drvs->rx_control_frames = port_stats->rx_control_frames; ++ drvs->rx_in_range_errors = port_stats->rx_in_range_errors; ++ drvs->rx_frame_too_long = port_stats->rx_frame_too_long; ++ drvs->rx_dropped_runt = port_stats->rx_dropped_runt; ++ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; ++ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; ++ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; ++ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow; ++ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; ++ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; ++ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; ++ drvs->rx_out_range_errors = port_stats->rx_out_range_errors; ++ drvs->rx_input_fifo_overflow_drop = ++ port_stats->rx_input_fifo_overflow; ++ drvs->rx_dropped_header_too_small = ++ port_stats->rx_dropped_header_too_small; ++ drvs->rx_address_match_errors = ++ port_stats->rx_address_match_errors; ++ drvs->rx_alignment_symbol_errors = ++ port_stats->rx_alignment_symbol_errors; ++ ++ drvs->tx_pauseframes = port_stats->tx_pauseframes; ++ drvs->tx_controlframes = port_stats->tx_controlframes; ++ ++ if (adapter->port_num) ++ drvs->jabber_events = ++ rxf_stats->port1_jabber_events; ++ else ++ drvs->jabber_events = ++ rxf_stats->port0_jabber_events; ++ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; ++ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb; ++ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; ++ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring; ++ drvs->forwarded_packets = rxf_stats->forwarded_packets; ++ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; ++ drvs->rx_drops_no_tpre_descr = ++ rxf_stats->rx_drops_no_tpre_descr; ++ drvs->rx_drops_too_many_frags = ++ rxf_stats->rx_drops_too_many_frags; ++ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; ++} ++ ++static void populate_be3_stats(struct be_adapter *adapter) ++{ ++ struct be_drv_stats *drvs = &adapter->drv_stats; ++ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter); ++ ++ struct be_rxf_stats_v1 *rxf_stats = ++ be_rxf_stats_from_cmd(adapter); ++ struct be_port_rxf_stats_v1 *port_stats = ++ be_port_rxf_stats_from_cmd(adapter); ++ ++ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; ++ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; ++ drvs->rx_pause_frames = port_stats->rx_pause_frames; ++ drvs->rx_crc_errors = port_stats->rx_crc_errors; ++ drvs->rx_control_frames = port_stats->rx_control_frames; ++ drvs->rx_in_range_errors = port_stats->rx_in_range_errors; ++ drvs->rx_frame_too_long = port_stats->rx_frame_too_long; ++ drvs->rx_dropped_runt = port_stats->rx_dropped_runt; ++ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; ++ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; ++ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; ++ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; ++ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; ++ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; ++ drvs->rx_out_range_errors = port_stats->rx_out_range_errors; ++ drvs->rx_dropped_header_too_small = ++ port_stats->rx_dropped_header_too_small; ++ drvs->rx_input_fifo_overflow_drop = ++ port_stats->rx_input_fifo_overflow_drop; ++ drvs->rx_address_match_errors = ++ port_stats->rx_address_match_errors; ++ drvs->rx_alignment_symbol_errors = ++ port_stats->rx_alignment_symbol_errors; ++ drvs->rxpp_fifo_overflow_drop = ++ port_stats->rxpp_fifo_overflow_drop; ++ drvs->tx_pauseframes = port_stats->tx_pauseframes; ++ drvs->tx_controlframes = port_stats->tx_controlframes; ++ drvs->jabber_events = port_stats->jabber_events; ++ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; ++ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb; ++ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; ++ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring; ++ drvs->forwarded_packets = rxf_stats->forwarded_packets; ++ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; ++ drvs->rx_drops_no_tpre_descr = ++ rxf_stats->rx_drops_no_tpre_descr; ++ drvs->rx_drops_too_many_frags = ++ rxf_stats->rx_drops_too_many_frags; ++ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; ++} ++ ++ ++static void accumulate_16bit_val(u32 *acc, u16 val) ++{ ++#define lo(x) (x & 0xFFFF) ++#define hi(x) (x & 0xFFFF0000) ++ bool wrapped = val < lo(*acc); ++ u32 newacc = hi(*acc) + val; ++ ++ if (wrapped) ++ newacc += 65536; ++ ACCESS_ONCE_RW(*acc) = newacc; ++} ++ ++void be_parse_stats(struct be_adapter *adapter) ++{ ++ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter); ++ struct be_rx_obj *rxo; ++ int i; ++ ++ if (adapter->generation == BE_GEN3) { ++ populate_be3_stats(adapter); ++ } else { ++ populate_be2_stats(adapter); ++ } ++ ++ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */ ++ for_all_rx_queues(adapter, rxo, i) { ++ /* below erx HW counter can actually wrap around after ++ * 65535. Driver accumulates a 32-bit value ++ */ ++ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, ++ (u16)erx->rx_drops_no_fragments[rxo->q.id]); ++ } + } + + void netdev_stats_update(struct be_adapter *adapter) + { +- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); +- struct be_rxf_stats *rxf_stats = &hw_stats->rxf; +- struct be_port_rxf_stats *port_stats = +- &rxf_stats->port[adapter->port_num]; +- struct net_device_stats *dev_stats = &adapter->stats.net_stats; +- struct be_erx_stats *erx_stats = &hw_stats->erx; ++ struct be_drv_stats *drvs = &adapter->drv_stats; ++ struct net_device_stats *dev_stats = &adapter->net_stats; ++ struct be_rx_obj *rxo; ++ struct be_tx_obj *txo; ++ unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0; ++ int i; + +- dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts; +- dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; +- dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; +- dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; ++ for_all_rx_queues(adapter, rxo, i) { ++ pkts += rx_stats(rxo)->rx_pkts; ++ bytes += rx_stats(rxo)->rx_bytes; ++ mcast += rx_stats(rxo)->rx_mcast_pkts; ++ drops += rx_stats(rxo)->rx_drops_no_frags; ++ } ++ dev_stats->rx_packets = pkts; ++ dev_stats->rx_bytes = bytes; ++ dev_stats->multicast = mcast; ++ dev_stats->rx_dropped = drops; ++ ++ pkts = bytes = 0; ++ for_all_tx_queues(adapter, txo, i) { ++ pkts += tx_stats(txo)->be_tx_pkts; ++ bytes += tx_stats(txo)->be_tx_bytes; ++ } ++ dev_stats->tx_packets = pkts; ++ dev_stats->tx_bytes = bytes; + + /* bad pkts received */ +- dev_stats->rx_errors = port_stats->rx_crc_errors + +- port_stats->rx_alignment_symbol_errors + +- port_stats->rx_in_range_errors + +- port_stats->rx_out_range_errors + +- port_stats->rx_frame_too_long + +- port_stats->rx_dropped_too_small + +- port_stats->rx_dropped_too_short + +- port_stats->rx_dropped_header_too_small + +- port_stats->rx_dropped_tcp_length + +- port_stats->rx_dropped_runt + +- port_stats->rx_tcp_checksum_errs + +- port_stats->rx_ip_checksum_errs + +- port_stats->rx_udp_checksum_errs; +- +- /* no space in linux buffers: best possible approximation */ +- dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0]; ++ dev_stats->rx_errors = drvs->rx_crc_errors + ++ drvs->rx_alignment_symbol_errors + ++ drvs->rx_in_range_errors + ++ drvs->rx_out_range_errors + ++ drvs->rx_frame_too_long + ++ drvs->rx_dropped_too_small + ++ drvs->rx_dropped_too_short + ++ drvs->rx_dropped_header_too_small + ++ drvs->rx_dropped_tcp_length + ++ drvs->rx_dropped_runt + ++ drvs->rx_tcp_checksum_errs + ++ drvs->rx_ip_checksum_errs + ++ drvs->rx_udp_checksum_errs; + + /* detailed rx errors */ +- dev_stats->rx_length_errors = port_stats->rx_in_range_errors + +- port_stats->rx_out_range_errors + +- port_stats->rx_frame_too_long; ++ dev_stats->rx_length_errors = drvs->rx_in_range_errors + ++ drvs->rx_out_range_errors + ++ drvs->rx_frame_too_long; + +- /* receive ring buffer overflow */ +- dev_stats->rx_over_errors = 0; +- +- dev_stats->rx_crc_errors = port_stats->rx_crc_errors; ++ dev_stats->rx_crc_errors = drvs->rx_crc_errors; + + /* frame alignment errors */ +- dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; ++ dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors; + + /* receiver fifo overrun */ + /* drops_no_pbuf is no per i/f, it's per BE card */ +- dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + +- port_stats->rx_input_fifo_overflow + +- rxf_stats->rx_drops_no_pbuf; +- /* receiver missed packetd */ +- dev_stats->rx_missed_errors = 0; +- +- /* packet transmit problems */ +- dev_stats->tx_errors = 0; +- +- /* no space available in linux */ +- dev_stats->tx_dropped = 0; +- +- dev_stats->multicast = port_stats->rx_multicast_frames; +- dev_stats->collisions = 0; +- +- /* detailed tx_errors */ +- dev_stats->tx_aborted_errors = 0; +- dev_stats->tx_carrier_errors = 0; +- dev_stats->tx_fifo_errors = 0; +- dev_stats->tx_heartbeat_errors = 0; +- dev_stats->tx_window_errors = 0; ++ dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + ++ drvs->rx_input_fifo_overflow_drop + ++ drvs->rx_drops_no_pbuf; + } + +-void be_link_status_update(struct be_adapter *adapter, bool link_up) ++void be_link_status_update(struct be_adapter *adapter, int link_status) + { + struct net_device *netdev = adapter->netdev; + + /* If link came up or went down */ +- if (adapter->link_up != link_up) { +- if (link_up) { ++ if (adapter->link_status != link_status) { ++ adapter->link_speed = -1; ++ if (link_status == LINK_UP) { + netif_start_queue(netdev); + netif_carrier_on(netdev); + printk(KERN_INFO "%s: Link up\n", netdev->name); +@@ -222,15 +491,15 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up) + netif_carrier_off(netdev); + printk(KERN_INFO "%s: Link down\n", netdev->name); + } +- adapter->link_up = link_up; ++ adapter->link_status = link_status; + } + } + + /* Update the EQ delay n BE based on the RX frags consumed / sec */ +-static void be_rx_eqd_update(struct be_adapter *adapter) ++static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo) + { +- struct be_eq_obj *rx_eq = &adapter->rx_eq; +- struct be_drvr_stats *stats = &adapter->stats.drvr_stats; ++ struct be_eq_obj *rx_eq = &rxo->rx_eq; ++ struct be_rx_stats *stats = &rxo->stats; + ulong now = jiffies; + u32 eqd; + +@@ -247,19 +516,17 @@ static void be_rx_eqd_update(struct be_adapter *adapter) + if ((now - stats->rx_fps_jiffies) < HZ) + return; + +- stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / ++ stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) / + ((now - stats->rx_fps_jiffies) / HZ); + + stats->rx_fps_jiffies = now; +- stats->be_prev_rx_frags = stats->be_rx_frags; +- eqd = stats->be_rx_fps / 110000; ++ stats->prev_rx_frags = stats->rx_frags; ++ eqd = stats->rx_fps / 110000; + eqd = eqd << 3; + if (eqd > rx_eq->max_eqd) + eqd = rx_eq->max_eqd; + if (eqd < rx_eq->min_eqd) + eqd = rx_eq->min_eqd; +- if (eqd < 10) +- eqd = 0; + if (eqd != rx_eq->cur_eqd) + be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd); + +@@ -270,7 +537,7 @@ static struct net_device_stats *be_get_stats(struct net_device *dev) + { + struct be_adapter *adapter = netdev_priv(dev); + +- return &adapter->stats.net_stats; ++ return &adapter->net_stats; + } + + static u32 be_calc_rate(u64 bytes, unsigned long ticks) +@@ -284,9 +551,9 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks) + return rate; + } + +-static void be_tx_rate_update(struct be_adapter *adapter) ++static void be_tx_rate_update(struct be_tx_obj *txo) + { +- struct be_drvr_stats *stats = drvr_stats(adapter); ++ struct be_tx_stats *stats = tx_stats(txo); + ulong now = jiffies; + + /* Wrapped around? */ +@@ -305,10 +572,11 @@ static void be_tx_rate_update(struct be_adapter *adapter) + } + } + +-static void be_tx_stats_update(struct be_adapter *adapter, ++static void be_tx_stats_update(struct be_tx_obj *txo, + u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) + { +- struct be_drvr_stats *stats = drvr_stats(adapter); ++ struct be_tx_stats *stats = tx_stats(txo); ++ + stats->be_tx_reqs++; + stats->be_tx_wrbs += wrb_cnt; + stats->be_tx_bytes += copied; +@@ -318,7 +586,8 @@ static void be_tx_stats_update(struct be_adapter *adapter, + } + + /* Determine number of WRB entries needed to xmit data in an skb */ +-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) ++static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, ++ bool *dummy) + { + int cnt = (skb->len > skb->data_len); + +@@ -326,12 +595,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) + + /* to account for hdr wrb */ + cnt++; +- if (cnt & 1) { ++ if (lancer_chip(adapter) || !(cnt & 1)) { ++ *dummy = false; ++ } else { + /* add a dummy to make it an even num */ + cnt++; + *dummy = true; +- } else +- *dummy = false; ++ } + BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); + return cnt; + } +@@ -343,17 +613,31 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) + wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; + } + +-static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, +- bool vlan, u32 wrb_cnt, u32 len) ++static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, ++ struct sk_buff *skb, u32 wrb_cnt, u32 len) + { ++ u16 vlan_tag = 0; ++ + memset(hdr, 0, sizeof(*hdr)); + + AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); + +- if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { ++ if (skb_is_gso(skb)) { + AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); + AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, + hdr, skb_shinfo(skb)->gso_size); ++ if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) ++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); ++ ++ if (lancer_A0_chip(adapter)) { ++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1); ++ if (is_tcp_pkt(skb)) ++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ++ tcpcs, hdr, 1); ++ else if (is_udp_pkt(skb)) ++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ++ udpcs, hdr, 1); ++ } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (is_tcp_pkt(skb)) + AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); +@@ -361,10 +645,10 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, + AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); + } + +- if (vlan && vlan_tx_tag_present(skb)) { ++ if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { + AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); +- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, +- hdr, vlan_tx_tag_get(skb)); ++ vlan_tag = be_get_tx_vlan_tag(adapter, skb); ++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag); + } + + AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); +@@ -374,14 +658,13 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, + } + + +-static int make_tx_wrbs(struct be_adapter *adapter, ++static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, + struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) + { +- u64 busaddr; +- u32 i, copied = 0; ++ dma_addr_t busaddr; ++ int i, copied = 0; + struct pci_dev *pdev = adapter->pdev; + struct sk_buff *first_skb = skb; +- struct be_queue_info *txq = &adapter->tx_obj.q; + struct be_eth_wrb *wrb; + struct be_eth_hdr_wrb *hdr; + +@@ -389,15 +672,11 @@ static int make_tx_wrbs(struct be_adapter *adapter, + atomic_add(wrb_cnt, &txq->used); + queue_head_inc(txq); + +- if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) { +- dev_err(&pdev->dev, "TX DMA mapping failed\n"); +- return 0; +- } +- + if (skb->len > skb->data_len) { +- int len = skb->len - skb->data_len; ++ int len = skb_headlen(skb); ++ busaddr = pci_map_single(pdev, skb->data, len, ++ PCI_DMA_TODEVICE); + wrb = queue_head_node(txq); +- busaddr = skb_shinfo(skb)->dma_head; + wrb_fill(wrb, busaddr, len); + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + queue_head_inc(txq); +@@ -407,8 +686,9 @@ static int make_tx_wrbs(struct be_adapter *adapter, + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = + &skb_shinfo(skb)->frags[i]; +- +- busaddr = skb_shinfo(skb)->dma_maps[i]; ++ busaddr = pci_map_page(pdev, frag->page, ++ frag->page_offset, ++ frag->size, PCI_DMA_TODEVICE); + wrb = queue_head_node(txq); + wrb_fill(wrb, busaddr, frag->size); + be_dws_cpu_to_le(wrb, sizeof(*wrb)); +@@ -423,8 +703,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, + queue_head_inc(txq); + } + +- wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false, +- wrb_cnt, copied); ++ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied); + be_dws_cpu_to_le(hdr, sizeof(*hdr)); + + return copied; +@@ -434,19 +713,70 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, + struct net_device *netdev) + { + struct be_adapter *adapter = netdev_priv(netdev); +- struct be_tx_obj *tx_obj = &adapter->tx_obj; +- struct be_queue_info *txq = &tx_obj->q; ++ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; ++ struct be_queue_info *txq = &txo->q; + u32 wrb_cnt = 0, copied = 0; + u32 start = txq->head; + bool dummy_wrb, stopped = false; + +- wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); ++ if (unlikely((skb_shinfo(skb)->gso_segs > 1) && ++ skb_shinfo(skb)->gso_size && is_ipv6_ext_hdr(skb))) { ++ tx_stats(txo)->be_ipv6_ext_hdr_tx_drop++; ++ goto tx_drop; ++ } + +- copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); ++ /* If the skb is a large pkt forwarded to this interface ++ * after being LRO'd on another interface, drop the pkt. ++ * HW cannot handle such pkts. LRO must be disabled when ++ * using the server as a router. ++ */ ++ if (!skb_is_gso(skb)) { ++ int eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? ++ VLAN_ETH_HLEN : ETH_HLEN; ++ ++ if ((skb->len - eth_hdr_len) > adapter->netdev->mtu) ++ goto tx_drop; ++ } ++ ++ /* The ASIC is calculating checksum for Vlan tagged pkts ++ * though CSO is disabled. ++ * To work around this, insert the Vlan tag in the driver ++ * and donot set the vlan bit, cso bit in the Tx WRB. ++ */ ++ if (unlikely(vlan_tx_tag_present(skb) && ++ ((skb->ip_summed != CHECKSUM_PARTIAL) || (skb->len <= 60)))) { ++ /* Bug 28694: Don't embed the host VLAN tag in SKB ++ * when UMC mode enabled on that interface ++ */ ++ if (!(adapter->function_mode & UMC_ENABLED)) { ++ skb = skb_share_check(skb, GFP_ATOMIC); ++ if (unlikely(!skb)) ++ goto tx_drop; ++ ++ skb = be_vlan_put_tag(skb, ++ be_get_tx_vlan_tag(adapter, skb)); ++ if (unlikely(!skb)) ++ goto tx_drop; ++ ++ be_reset_skb_tx_vlan(skb); ++ } ++ } ++ ++ /* Bug 12422: the stack can send us skbs with length more than 65535 ++ * BE cannot handle such requests. Hack the extra data out and drop it. ++ */ ++ if (skb->len > 65535) { ++ int err = __pskb_trim(skb, 65535); ++ BUG_ON(err); ++ } ++ ++ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); ++ ++ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); + if (copied) { + /* record the sent skb in the sent_skb table */ +- BUG_ON(tx_obj->sent_skb_list[start]); +- tx_obj->sent_skb_list[start] = skb; ++ BUG_ON(txo->sent_skb_list[start]); ++ txo->sent_skb_list[start] = skb; + + /* Ensure txq has space for the next skb; Else stop the queue + * *BEFORE* ringing the tx doorbell, so that we serialze the +@@ -454,16 +784,21 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, + */ + if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= + txq->len) { +- netif_stop_queue(netdev); ++ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb)); + stopped = true; + } + + be_txq_notify(adapter, txq->id, wrb_cnt); + +- be_tx_stats_update(adapter, wrb_cnt, copied, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) ++ netdev->trans_start = jiffies; ++#endif ++ ++ be_tx_stats_update(txo, wrb_cnt, copied, + skb_shinfo(skb)->gso_segs, stopped); + } else { + txq->head = start; ++tx_drop: + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; +@@ -473,10 +808,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) + { + struct be_adapter *adapter = netdev_priv(netdev); + if (new_mtu < BE_MIN_MTU || +- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { ++ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - ++ (ETH_HLEN + ETH_FCS_LEN))) { + dev_info(&adapter->pdev->dev, + "MTU must be between %d and %d bytes\n", +- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); ++ BE_MIN_MTU, ++ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); + return -EINVAL; + } + dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", +@@ -486,17 +823,19 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) + } + + /* +- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, +- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured, +- * set the BE in promiscuous VLAN mode. ++ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. ++ * If the user configures more, place BE in vlan promiscuous mode. + */ +-static int be_vid_config(struct be_adapter *adapter) ++static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) + { + u16 vtag[BE_NUM_VLANS_SUPPORTED]; + u16 ntags = 0, i; +- int status; ++ int status = 0; + +- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) { ++ /* No need to change the VLAN state if the I/F is in promiscous */ ++ if (adapter->promiscuous) ++ return 0; ++ if (adapter->vlans_added <= adapter->max_vlans) { + /* Construct VLAN Table to give to HW */ + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + if (adapter->vlan_tag[i]) { +@@ -504,47 +843,46 @@ static int be_vid_config(struct be_adapter *adapter) + ntags++; + } + } +- status = be_cmd_vlan_config(adapter, adapter->if_handle, +- vtag, ntags, 1, 0); ++ /* Send command only if there is something to be programmed */ ++ if (ntags) ++ status = be_cmd_vlan_config(adapter, adapter->if_handle, ++ vtag, ntags, 1, 0); + } else { + status = be_cmd_vlan_config(adapter, adapter->if_handle, +- NULL, 0, 1, 1); ++ NULL, 0, 1, 1); + } ++ + return status; + } + + static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) + { + struct be_adapter *adapter = netdev_priv(netdev); +- struct be_eq_obj *rx_eq = &adapter->rx_eq; +- struct be_eq_obj *tx_eq = &adapter->tx_eq; + +- be_eq_notify(adapter, rx_eq->q.id, false, false, 0); +- be_eq_notify(adapter, tx_eq->q.id, false, false, 0); + adapter->vlan_grp = grp; +- be_eq_notify(adapter, rx_eq->q.id, true, false, 0); +- be_eq_notify(adapter, tx_eq->q.id, true, false, 0); + } + + static void be_vlan_add_vid(struct net_device *netdev, u16 vid) + { + struct be_adapter *adapter = netdev_priv(netdev); + +- adapter->num_vlans++; ++ adapter->vlans_added++; ++ + adapter->vlan_tag[vid] = 1; +- +- be_vid_config(adapter); ++ if (adapter->vlans_added <= (adapter->max_vlans + 1)) ++ be_vid_config(adapter, false, 0); + } + + static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) + { + struct be_adapter *adapter = netdev_priv(netdev); + +- adapter->num_vlans--; +- adapter->vlan_tag[vid] = 0; +- ++ adapter->vlans_added--; + vlan_group_set_device(adapter->vlan_grp, vid, NULL); +- be_vid_config(adapter); ++ ++ adapter->vlan_tag[vid] = 0; ++ if (adapter->vlans_added <= adapter->max_vlans) ++ be_vid_config(adapter, false, 0); + } + + static void be_set_multicast_list(struct net_device *netdev) +@@ -552,7 +890,7 @@ static void be_set_multicast_list(struct net_device *netdev) + struct be_adapter *adapter = netdev_priv(netdev); + + if (netdev->flags & IFF_PROMISC) { +- be_cmd_promiscuous_config(adapter, adapter->port_num, 1); ++ be_cmd_rx_filter(adapter, IFF_PROMISC, ON); + adapter->promiscuous = true; + goto done; + } +@@ -560,81 +898,244 @@ static void be_set_multicast_list(struct net_device *netdev) + /* BE was previously in promiscous mode; disable it */ + if (adapter->promiscuous) { + adapter->promiscuous = false; +- be_cmd_promiscuous_config(adapter, adapter->port_num, 0); ++ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); ++ ++ if (adapter->vlans_added) ++ be_vid_config(adapter, false, 0); + } + +- if (netdev->flags & IFF_ALLMULTI) { +- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0); ++ /* Enable multicast promisc if num configured exceeds what we support */ ++ if (netdev->flags & IFF_ALLMULTI || ++ netdev_mc_count(netdev) > BE_MAX_MC) { ++ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); + goto done; + } + +- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list, +- netdev->mc_count); ++ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); + done: + return; + } + +-static void be_rx_rate_update(struct be_adapter *adapter) ++#ifdef HAVE_SRIOV_CONFIG ++static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) + { +- struct be_drvr_stats *stats = drvr_stats(adapter); ++ struct be_adapter *adapter = netdev_priv(netdev); ++ int status; ++ ++ if (adapter->num_vfs == 0) ++ return -EPERM; ++ ++ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) ++ return -EINVAL; ++ ++ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) ++ status = be_cmd_pmac_del(adapter, ++ adapter->vf_cfg[vf].vf_if_handle, ++ adapter->vf_cfg[vf].vf_pmac_id, vf + 1); ++ ++ status = be_cmd_pmac_add(adapter, mac, ++ adapter->vf_cfg[vf].vf_if_handle, ++ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1); ++ ++ if (status) ++ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", ++ mac, vf); ++ else ++ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); ++ ++ return status; ++} ++ ++static int be_get_vf_config(struct net_device *netdev, int vf, ++ struct ifla_vf_info *vi) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ ++ if (adapter->num_vfs == 0) ++ return -EPERM; ++ ++ if (vf >= adapter->num_vfs) ++ return -EINVAL; ++ ++ vi->vf = vf; ++ vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate; ++ vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag & VLAN_VID_MASK; ++ vi->qos = adapter->vf_cfg[vf].vf_vlan_tag >> VLAN_PRIO_SHIFT; ++ memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN); ++ ++ return 0; ++} ++ ++/* ++ * Entry point to configure vlan behavior for a VF. ++ * 1. By default a VF is vlan Challenged. ++ * 2. It may or may not have Transparent Tagging enabled. ++ * 3. Vlan privilege for a VF can be toggled using special VID 4095. ++ * 4. When removing the Vlan privilege for a VF there is no need set default vid ++ * 5. Transparent Tagging configured for a VF resets its Vlan privilege ++ * 6. To disable the current Transparet Tagging for a VF: ++ * 6a. run the last iproute command with vlan set to 0. ++ * 6b. programing the default vid will disable Transparent Tagging in ARM/ASIC ++ */ ++static int be_set_vf_vlan(struct net_device *netdev, ++ int vf, u16 vlan, u8 qos) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ int status = 0; ++ u32 en = 0; ++ ++ if (adapter->num_vfs == 0) ++ return -EPERM; ++ ++ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) ++ return -EINVAL; ++ ++ status = be_cmd_get_fn_privileges(adapter, &en, vf + 1); ++ if (status) ++ goto sts; ++ ++ if (vlan == 4095) { ++ if (en & BE_PRIV_FILTMGMT) { ++ /* Knock off filtering privileges */ ++ en &= ~BE_PRIV_FILTMGMT; ++ } else { ++ en |= BE_PRIV_FILTMGMT; ++ /* Transparent Tagging is currently enabled, Reset it */ ++ if (adapter->vf_cfg[vf].vf_vlan_tag) { ++ adapter->vf_cfg[vf].vf_vlan_tag = 0; ++ vlan = adapter->vf_cfg[vf].vf_def_vid; ++ be_cmd_set_hsw_config(adapter, vlan, vf + 1, ++ adapter->vf_cfg[vf].vf_if_handle); ++ } ++ } ++ ++ adapter->vf_cfg[vf].vf_vlan_tag = 0; ++ status = be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1); ++ ++ goto sts; ++ } ++ ++ if (vlan || qos) { ++ if (en & BE_PRIV_FILTMGMT) { ++ /* Check privilege and reset it to default */ ++ en &= ~BE_PRIV_FILTMGMT; ++ be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1); ++ } ++ ++ vlan |= qos << VLAN_PRIO_SHIFT; ++ if (adapter->vf_cfg[vf].vf_vlan_tag != vlan) { ++ /* If this is new value, program it. Else skip. */ ++ adapter->vf_cfg[vf].vf_vlan_tag = vlan; ++ ++ status = be_cmd_set_hsw_config(adapter, vlan, ++ vf + 1, adapter->vf_cfg[vf].vf_if_handle); ++ } ++ ++ } else { ++ /* Reset Transparent Vlan Tagging. */ ++ adapter->vf_cfg[vf].vf_vlan_tag = 0; ++ vlan = adapter->vf_cfg[vf].vf_def_vid; ++ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, ++ adapter->vf_cfg[vf].vf_if_handle); ++ } ++ ++sts: ++ if (status) ++ dev_info(&adapter->pdev->dev, ++ "VLAN %d config on VF %d failed\n", vlan, vf); ++ return status; ++} ++ ++static int be_set_vf_tx_rate(struct net_device *netdev, ++ int vf, int rate) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ int status = 0; ++ ++ if (adapter->num_vfs == 0) ++ return -EPERM; ++ ++ if ((vf >= adapter->num_vfs) || (rate < 0)) ++ return -EINVAL; ++ ++ if (rate > 10000) ++ rate = 10000; ++ ++ adapter->vf_cfg[vf].vf_tx_rate = rate; ++ status = be_cmd_set_qos(adapter, rate / 10, vf + 1); ++ ++ if (status) ++ dev_info(&adapter->pdev->dev, ++ "tx rate %d on VF %d failed\n", rate, vf); ++ return status; ++} ++#endif /* HAVE_SRIOV_CONFIG */ ++ ++static void be_rx_rate_update(struct be_rx_obj *rxo) ++{ ++ struct be_rx_stats *stats = &rxo->stats; + ulong now = jiffies; + + /* Wrapped around */ +- if (time_before(now, stats->be_rx_jiffies)) { +- stats->be_rx_jiffies = now; ++ if (time_before(now, stats->rx_jiffies)) { ++ stats->rx_jiffies = now; + return; + } + + /* Update the rate once in two seconds */ +- if ((now - stats->be_rx_jiffies) < 2 * HZ) ++ if ((now - stats->rx_jiffies) < 2 * HZ) + return; + +- stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes +- - stats->be_rx_bytes_prev, +- now - stats->be_rx_jiffies); +- stats->be_rx_jiffies = now; +- stats->be_rx_bytes_prev = stats->be_rx_bytes; ++ stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev, ++ now - stats->rx_jiffies); ++ stats->rx_jiffies = now; ++ stats->rx_bytes_prev = stats->rx_bytes; + } + +-static void be_rx_stats_update(struct be_adapter *adapter, +- u32 pktsize, u16 numfrags) ++static void be_rx_stats_update(struct be_rx_obj *rxo, ++ struct be_rx_compl_info *rxcp) + { +- struct be_drvr_stats *stats = drvr_stats(adapter); ++ struct be_rx_stats *stats = &rxo->stats; + +- stats->be_rx_compl++; +- stats->be_rx_frags += numfrags; +- stats->be_rx_bytes += pktsize; +- stats->be_rx_pkts++; ++ stats->rx_compl++; ++ stats->rx_frags += rxcp->num_rcvd; ++ stats->rx_bytes += rxcp->pkt_size; ++ stats->rx_pkts++; ++ if (rxcp->pkt_type == BE_MULTICAST_PACKET) ++ stats->rx_mcast_pkts++; ++ if (rxcp->err) ++ stats->rxcp_err++; + } + +-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) ++static inline bool csum_passed(struct be_rx_compl_info *rxcp) + { +- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk; +- +- l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); +- ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); +- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); +- if (ip_version) { +- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); +- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp); +- } +- ipv6_chk = (ip_version && (tcpf || udpf)); +- +- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true; ++ /* L4 checksum is not reliable for non TCP/UDP packets. ++ * Also ignore ipcksm for ipv6 pkts */ ++ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && ++ (rxcp->ip_csum || rxcp->ipv6); + } + + static struct be_rx_page_info * +-get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) ++get_rx_page_info(struct be_adapter *adapter, struct be_rx_obj *rxo, ++ u16 frag_idx) + { + struct be_rx_page_info *rx_page_info; +- struct be_queue_info *rxq = &adapter->rx_obj.q; ++ struct be_queue_info *rxq = &rxo->q; + +- rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; +- BUG_ON(!rx_page_info->page); ++ rx_page_info = &rxo->page_info_tbl[frag_idx]; ++ if (!rx_page_info->page) { ++ printk(KERN_EMERG "curr_idx=%d prev_dix=%d rxq->head=%d\n", ++ frag_idx, rxo->prev_frag_idx, rxq->head); ++ BUG_ON(!rx_page_info->page); ++ } + +- if (rx_page_info->last_page_user) ++ if (rx_page_info->last_page_user) { + pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), + adapter->big_page_size, PCI_DMA_FROMDEVICE); ++ rx_page_info->last_page_user = false; ++ } ++ ++ rxo->prev_frag_idx = frag_idx; + + atomic_dec(&rxq->used); + return rx_page_info; +@@ -642,20 +1143,26 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) + + /* Throwaway the data in the Rx completion */ + static void be_rx_compl_discard(struct be_adapter *adapter, +- struct be_eth_rx_compl *rxcp) ++ struct be_rx_obj *rxo, ++ struct be_rx_compl_info *rxcp) + { +- struct be_queue_info *rxq = &adapter->rx_obj.q; ++ struct be_queue_info *rxq = &rxo->q; + struct be_rx_page_info *page_info; +- u16 rxq_idx, i, num_rcvd; ++ u16 i; ++ bool oob_error; ++ u16 num_rcvd = rxcp->num_rcvd; + +- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); +- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); ++ oob_error = lancer_A0_chip(adapter) && rxcp->err; ++ ++ /* In case of OOB error num_rcvd will be 1 more than actual */ ++ if (oob_error && num_rcvd) ++ num_rcvd -= 1; + + for (i = 0; i < num_rcvd; i++) { +- page_info = get_rx_page_info(adapter, rxq_idx); ++ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); +- index_inc(&rxq_idx, rxq->len); ++ index_inc(&rxcp->rxq_idx, rxq->len); + } + } + +@@ -663,29 +1170,24 @@ static void be_rx_compl_discard(struct be_adapter *adapter, + * skb_fill_rx_data forms a complete skb for an ether frame + * indicated by rxcp. + */ +-static void skb_fill_rx_data(struct be_adapter *adapter, +- struct sk_buff *skb, struct be_eth_rx_compl *rxcp) ++static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, ++ struct sk_buff *skb, struct be_rx_compl_info *rxcp) + { +- struct be_queue_info *rxq = &adapter->rx_obj.q; ++ struct be_queue_info *rxq = &rxo->q; + struct be_rx_page_info *page_info; +- u16 rxq_idx, i, num_rcvd, j; +- u32 pktsize, hdr_len, curr_frag_len, size; ++ u16 i, j; ++ u16 hdr_len, curr_frag_len, remaining; + u8 *start; + +- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); +- pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); +- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); +- +- page_info = get_rx_page_info(adapter, rxq_idx); +- ++ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); + start = page_address(page_info->page) + page_info->page_offset; + prefetch(start); + + /* Copy data in the first descriptor of this completion */ +- curr_frag_len = min(pktsize, rx_frag_size); ++ curr_frag_len = min(rxcp->pkt_size, rx_frag_size); + + /* Copy the header portion into skb_data */ +- hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); ++ hdr_len = min(BE_HDR_LEN, curr_frag_len); + memcpy(skb->data, start, hdr_len); + skb->len = curr_frag_len; + if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ +@@ -702,21 +1204,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter, + skb->data_len = curr_frag_len - hdr_len; + skb->tail += hdr_len; + } +- memset(page_info, 0, sizeof(*page_info)); ++ page_info->page = NULL; + +- if (pktsize <= rx_frag_size) { +- BUG_ON(num_rcvd != 1); +- goto done; ++ if (rxcp->pkt_size <= rx_frag_size) { ++ BUG_ON(rxcp->num_rcvd != 1); ++ return; + } + + /* More frags present for this completion */ +- size = pktsize; +- for (i = 1, j = 0; i < num_rcvd; i++) { +- size -= curr_frag_len; +- index_inc(&rxq_idx, rxq->len); +- page_info = get_rx_page_info(adapter, rxq_idx); +- +- curr_frag_len = min(size, rx_frag_size); ++ index_inc(&rxcp->rxq_idx, rxq->len); ++ remaining = rxcp->pkt_size - curr_frag_len; ++ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { ++ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); ++ curr_frag_len = min(remaining, rx_frag_size); + + /* Coalesce all frags from the same physical page in one slot */ + if (page_info->page_offset == 0) { +@@ -735,99 +1235,122 @@ static void skb_fill_rx_data(struct be_adapter *adapter, + skb->len += curr_frag_len; + skb->data_len += curr_frag_len; + +- memset(page_info, 0, sizeof(*page_info)); ++ remaining -= curr_frag_len; ++ index_inc(&rxcp->rxq_idx, rxq->len); ++ page_info->page = NULL; + } + BUG_ON(j > MAX_SKB_FRAGS); +- +-done: +- be_rx_stats_update(adapter, pktsize, num_rcvd); +- return; + } + +-/* Process the RX completion indicated by rxcp when GRO is disabled */ ++/* Process the RX completion indicated by rxcp when LRO is disabled */ + static void be_rx_compl_process(struct be_adapter *adapter, +- struct be_eth_rx_compl *rxcp) ++ struct be_rx_obj *rxo, ++ struct be_rx_compl_info *rxcp) + { + struct sk_buff *skb; +- u32 vlanf, vid; +- u8 vtm; + +- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); +- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); +- +- /* vlanf could be wrongly set in some cards. +- * ignore if vtm is not set */ +- if ((adapter->cap == 0x400) && !vtm) +- vlanf = 0; +- +- skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); +- if (!skb) { ++ skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); ++ if (unlikely(!skb)) { + if (net_ratelimit()) + dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); +- be_rx_compl_discard(adapter, rxcp); ++ be_rx_compl_discard(adapter, rxo, rxcp); + return; + } + +- skb_reserve(skb, NET_IP_ALIGN); ++ skb_fill_rx_data(adapter, rxo, skb, rxcp); + +- skb_fill_rx_data(adapter, skb, rxcp); +- +- if (do_pkt_csum(rxcp, adapter->rx_csum)) +- skb->ip_summed = CHECKSUM_NONE; +- else ++ if (likely(adapter->rx_csum && csum_passed(rxcp))) + skb->ip_summed = CHECKSUM_UNNECESSARY; ++ else ++ skb->ip_summed = CHECKSUM_NONE; + + skb->truesize = skb->len + sizeof(struct sk_buff); ++ if (unlikely(rxcp->vlanf) && ++ unlikely(!vlan_configured(adapter))) { ++ __vlan_put_tag(skb, rxcp->vlan_tag); ++ } + skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->dev = adapter->netdev; + +- if (vlanf) { +- if (!adapter->vlan_grp || adapter->num_vlans == 0) { +- kfree_skb(skb); +- return; +- } +- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); +- vid = be16_to_cpu(vid); +- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); +- } else { ++ if (unlikely(rxcp->vlanf) && ++ vlan_configured(adapter)) ++ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, ++ rxcp->vlan_tag); ++ else + netif_receive_skb(skb); ++ ++ return; ++} ++ ++/* Process the RX completion indicated by rxcp when LRO is enabled */ ++static void be_rx_compl_process_lro(struct be_adapter *adapter, ++ struct be_rx_obj *rxo, ++ struct be_rx_compl_info *rxcp) ++{ ++ struct be_rx_page_info *page_info; ++ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; ++ struct be_queue_info *rxq = &rxo->q; ++ u16 remaining, curr_frag_len; ++ u16 i, j; ++ ++ remaining = rxcp->pkt_size; ++ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { ++ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); ++ ++ curr_frag_len = min(remaining, rx_frag_size); ++ ++ /* Coalesce all frags from the same physical page in one slot */ ++ if (i == 0 || page_info->page_offset == 0) { ++ /* First frag or Fresh page */ ++ j++; ++ rx_frags[j].page = page_info->page; ++ rx_frags[j].page_offset = page_info->page_offset; ++ rx_frags[j].size = 0; ++ } else { ++ put_page(page_info->page); ++ } ++ rx_frags[j].size += curr_frag_len; ++ ++ remaining -= curr_frag_len; ++ index_inc(&rxcp->rxq_idx, rxq->len); ++ memset(page_info, 0, sizeof(*page_info)); ++ } ++ BUG_ON(j > MAX_SKB_FRAGS); ++ ++ if (likely(!rxcp->vlanf)) { ++ lro_receive_frags(&rxo->lro_mgr, rx_frags, rxcp->pkt_size, ++ rxcp->pkt_size, NULL, 0); ++ } else { ++ lro_vlan_hwaccel_receive_frags(&rxo->lro_mgr, rx_frags, ++ rxcp->pkt_size, rxcp->pkt_size, adapter->vlan_grp, ++ rxcp->vlan_tag, NULL, 0); + } + + return; + } + + /* Process the RX completion indicated by rxcp when GRO is enabled */ +-static void be_rx_compl_process_gro(struct be_adapter *adapter, +- struct be_eth_rx_compl *rxcp) ++void be_rx_compl_process_gro(struct be_adapter *adapter, ++ struct be_rx_obj *rxo, ++ struct be_rx_compl_info *rxcp) + { ++#ifdef NETIF_F_GRO + struct be_rx_page_info *page_info; + struct sk_buff *skb = NULL; +- struct be_queue_info *rxq = &adapter->rx_obj.q; +- struct be_eq_obj *eq_obj = &adapter->rx_eq; +- u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; +- u16 i, rxq_idx = 0, vid, j; +- u8 vtm; +- +- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); +- pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); +- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); +- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); +- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); +- +- /* vlanf could be wrongly set in some cards. +- * ignore if vtm is not set */ +- if ((adapter->cap == 0x400) && !vtm) +- vlanf = 0; ++ struct be_queue_info *rxq = &rxo->q; ++ struct be_eq_obj *eq_obj = &rxo->rx_eq; ++ u16 remaining, curr_frag_len; ++ u16 i, j; + + skb = napi_get_frags(&eq_obj->napi); + if (!skb) { +- be_rx_compl_discard(adapter, rxcp); ++ be_rx_compl_discard(adapter, rxo, rxcp); + return; + } + +- remaining = pkt_size; +- for (i = 0, j = -1; i < num_rcvd; i++) { +- page_info = get_rx_page_info(adapter, rxq_idx); ++ remaining = rxcp->pkt_size; ++ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { ++ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); + + curr_frag_len = min(remaining, rx_frag_size); + +@@ -845,55 +1368,129 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, + skb_shinfo(skb)->frags[j].size += curr_frag_len; + + remaining -= curr_frag_len; +- index_inc(&rxq_idx, rxq->len); ++ index_inc(&rxcp->rxq_idx, rxq->len); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(j > MAX_SKB_FRAGS); + + skb_shinfo(skb)->nr_frags = j + 1; +- skb->len = pkt_size; +- skb->data_len = pkt_size; +- skb->truesize += pkt_size; ++ skb->len = rxcp->pkt_size; ++ skb->data_len = rxcp->pkt_size; ++ skb->truesize += rxcp->pkt_size; + skb->ip_summed = CHECKSUM_UNNECESSARY; + +- if (likely(!vlanf)) { ++ if (likely(!rxcp->vlanf)) + napi_gro_frags(&eq_obj->napi); +- } else { +- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); +- vid = be16_to_cpu(vid); ++ else ++ vlan_gro_frags(&eq_obj->napi, ++ adapter->vlan_grp, rxcp->vlan_tag); ++#endif + +- if (!adapter->vlan_grp || adapter->num_vlans == 0) +- return; +- +- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); +- } +- +- be_rx_stats_update(adapter, pkt_size, num_rcvd); + return; + } + +-static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) ++static void be_parse_rx_compl_v1(struct be_adapter *adapter, ++ struct be_eth_rx_compl *compl, ++ struct be_rx_compl_info *rxcp) + { +- struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); ++ rxcp->pkt_size = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl); ++ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl); ++ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl); ++ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl); ++ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl); ++ rxcp->ip_csum = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl); ++ rxcp->l4_csum = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); ++ rxcp->ipv6 = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); ++ rxcp->rxq_idx = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl); ++ rxcp->num_rcvd = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); ++ rxcp->pkt_type = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); ++ if (rxcp->vlanf) { ++ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, ++ compl); ++ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ++ vlan_tag, compl); ++ } ++ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); ++} + +- if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) ++static void be_parse_rx_compl_v0(struct be_adapter *adapter, ++ struct be_eth_rx_compl *compl, ++ struct be_rx_compl_info *rxcp) ++{ ++ rxcp->pkt_size = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl); ++ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl); ++ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl); ++ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl); ++ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl); ++ rxcp->ip_csum = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl); ++ rxcp->l4_csum = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); ++ rxcp->ipv6 = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); ++ rxcp->rxq_idx = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl); ++ rxcp->num_rcvd = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); ++ rxcp->pkt_type = ++ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); ++ if (rxcp->vlanf) { ++ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, ++ compl); ++ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ++ vlan_tag, compl); ++ } ++ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); ++} ++ ++static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) ++{ ++ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); ++ struct be_rx_compl_info *rxcp = &rxo->rxcp; ++ struct be_adapter *adapter = rxo->adapter; ++ ++ /* For checking the valid bit it is Ok to use either definition as the ++ * valid bit is at the same position in both v0 and v1 Rx compl */ ++ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0) + return NULL; + +- be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); ++ rmb(); ++ be_dws_le_to_cpu(compl, sizeof(*compl)); + +- queue_tail_inc(&adapter->rx_obj.cq); ++ if (adapter->be3_native) ++ be_parse_rx_compl_v1(adapter, compl, rxcp); ++ else ++ be_parse_rx_compl_v0(adapter, compl, rxcp); ++ ++ if (rxcp->vlanf) { ++ /* vlanf could be wrongly set in some cards. ++ * ignore if vtm is not set */ ++ if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm) ++ rxcp->vlanf = 0; ++ ++ if (!lancer_chip(adapter)) ++ rxcp->vlan_tag = swab16(rxcp->vlan_tag); ++ ++ if ((adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK)) && ++ !adapter->vlan_tag[rxcp->vlan_tag]) ++ rxcp->vlanf = 0; ++ } ++ ++ /* As the compl has been parsed, reset it; we wont touch it again */ ++ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; ++ ++ queue_tail_inc(&rxo->cq); + return rxcp; + } + +-/* To reset the valid bit, we need to reset the whole word as +- * when walking the queue the valid entries are little-endian +- * and invalid entries are host endian +- */ +-static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp) +-{ +- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; +-} +- + static inline struct page *be_alloc_pages(u32 size) + { + gfp_t alloc_flags = GFP_ATOMIC; +@@ -907,11 +1504,12 @@ static inline struct page *be_alloc_pages(u32 size) + * Allocate a page, split it to fragments of size rx_frag_size and post as + * receive buffers to BE + */ +-static void be_post_rx_frags(struct be_adapter *adapter) ++static void be_post_rx_frags(struct be_rx_obj *rxo) + { +- struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; +- struct be_rx_page_info *page_info = NULL; +- struct be_queue_info *rxq = &adapter->rx_obj.q; ++ struct be_adapter *adapter = rxo->adapter; ++ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; ++ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; ++ struct be_queue_info *rxq = &rxo->q; + struct page *pagep = NULL; + struct be_eth_rx_d *rxd; + u64 page_dmaaddr = 0, frag_dmaaddr; +@@ -922,7 +1520,7 @@ static void be_post_rx_frags(struct be_adapter *adapter) + if (!pagep) { + pagep = be_alloc_pages(adapter->big_page_size); + if (unlikely(!pagep)) { +- drvr_stats(adapter)->be_ethrx_post_fail++; ++ rxo->stats.rx_post_fail++; + break; + } + page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, +@@ -941,7 +1539,6 @@ static void be_post_rx_frags(struct be_adapter *adapter) + rxd = queue_head_node(rxq); + rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); + rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); +- queue_head_inc(rxq); + + /* Any space left in the current big page for another frag? */ + if ((page_offset + rx_frag_size + rx_frag_size) > +@@ -949,17 +1546,24 @@ static void be_post_rx_frags(struct be_adapter *adapter) + pagep = NULL; + page_info->last_page_user = true; + } ++ ++ prev_page_info = page_info; ++ queue_head_inc(rxq); + page_info = &page_info_tbl[rxq->head]; + } + if (pagep) +- page_info->last_page_user = true; ++ prev_page_info->last_page_user = true; + ++ /* Ensure that posting buffers is the last thing done by this ++ * routine to avoid racing between rx bottom-half and ++ * be_worker (process) contexts. ++ */ + if (posted) { + atomic_add(posted, &rxq->used); + be_rxq_notify(adapter, rxq->id, posted); + } else if (atomic_read(&rxq->used) == 0) { + /* Let be_worker replenish when memory is available */ +- adapter->rx_post_starved = true; ++ rxo->rx_post_starved = true; + } + + return; +@@ -972,6 +1576,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) + if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) + return NULL; + ++ rmb(); + be_dws_le_to_cpu(txcp, sizeof(*txcp)); + + txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; +@@ -980,11 +1585,14 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) + return txcp; + } + +-static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) ++static u16 be_tx_compl_process(struct be_adapter *adapter, ++ struct be_tx_obj *txo, u16 last_index) + { +- struct be_queue_info *txq = &adapter->tx_obj.q; +- struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; ++ struct be_queue_info *txq = &txo->q; ++ struct be_eth_wrb *wrb; ++ struct sk_buff **sent_skbs = txo->sent_skb_list; + struct sk_buff *sent_skb; ++ u64 busaddr; + u16 cur_index, num_wrbs = 0; + + cur_index = txq->tail; +@@ -992,15 +1600,31 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) + BUG_ON(!sent_skb); + sent_skbs[cur_index] = NULL; + +- do { ++ wrb = queue_tail_node(txq); ++ be_dws_le_to_cpu(wrb, sizeof(*wrb)); ++ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; ++ if (busaddr != 0) { ++ pci_unmap_single(adapter->pdev, busaddr, ++ wrb->frag_len, PCI_DMA_TODEVICE); ++ } ++ num_wrbs++; ++ queue_tail_inc(txq); ++ ++ while (cur_index != last_index) { + cur_index = txq->tail; ++ wrb = queue_tail_node(txq); ++ be_dws_le_to_cpu(wrb, sizeof(*wrb)); ++ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; ++ if (busaddr != 0) { ++ pci_unmap_page(adapter->pdev, busaddr, ++ wrb->frag_len, PCI_DMA_TODEVICE); ++ } + num_wrbs++; + queue_tail_inc(txq); +- } while (cur_index != last_index); ++ } + +- atomic_sub(num_wrbs, &txq->used); +- skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE); + kfree_skb(sent_skb); ++ return num_wrbs; + } + + static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) +@@ -1010,13 +1634,15 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) + if (!eqe->evt) + return NULL; + ++ rmb(); + eqe->evt = le32_to_cpu(eqe->evt); + queue_tail_inc(&eq_obj->q); + return eqe; + } + + static int event_handle(struct be_adapter *adapter, +- struct be_eq_obj *eq_obj) ++ struct be_eq_obj *eq_obj, ++ bool rearm) + { + struct be_eq_entry *eqe; + u16 num = 0; +@@ -1029,7 +1655,10 @@ static int event_handle(struct be_adapter *adapter, + /* Deal with any spurious interrupts that come + * without events + */ +- be_eq_notify(adapter, eq_obj->q.id, true, true, num); ++ if (!num) ++ rearm = true; ++ ++ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num); + if (num) + napi_schedule(&eq_obj->napi); + +@@ -1053,49 +1682,55 @@ static void be_eq_clean(struct be_adapter *adapter, + be_eq_notify(adapter, eq_obj->q.id, false, true, num); + } + +-static void be_rx_q_clean(struct be_adapter *adapter) ++static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo) + { + struct be_rx_page_info *page_info; +- struct be_queue_info *rxq = &adapter->rx_obj.q; +- struct be_queue_info *rx_cq = &adapter->rx_obj.cq; +- struct be_eth_rx_compl *rxcp; ++ struct be_queue_info *rxq = &rxo->q; ++ struct be_queue_info *rx_cq = &rxo->cq; ++ struct be_rx_compl_info *rxcp; + u16 tail; + + /* First cleanup pending rx completions */ +- while ((rxcp = be_rx_compl_get(adapter)) != NULL) { +- be_rx_compl_discard(adapter, rxcp); +- be_rx_compl_reset(rxcp); ++ while ((rxcp = be_rx_compl_get(rxo)) != NULL) { ++ be_rx_compl_discard(adapter, rxo, rxcp); + be_cq_notify(adapter, rx_cq->id, true, 1); + } + + /* Then free posted rx buffer that were not used */ + tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; + for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { +- page_info = get_rx_page_info(adapter, tail); ++ page_info = get_rx_page_info(adapter, rxo, tail); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(atomic_read(&rxq->used)); ++ rxq->tail = rxq->head = 0; + } + +-static void be_tx_compl_clean(struct be_adapter *adapter) ++static void be_tx_compl_clean(struct be_adapter *adapter, ++ struct be_tx_obj *txo) + { +- struct be_queue_info *tx_cq = &adapter->tx_obj.cq; +- struct be_queue_info *txq = &adapter->tx_obj.q; ++ struct be_queue_info *tx_cq = &txo->cq; ++ struct be_queue_info *txq = &txo->q; + struct be_eth_tx_compl *txcp; +- u16 end_idx, cmpl = 0, timeo = 0; ++ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0; ++ struct sk_buff **sent_skbs = txo->sent_skb_list; ++ struct sk_buff *sent_skb; ++ bool dummy_wrb; + + /* Wait for a max of 200ms for all the tx-completions to arrive. */ + do { + while ((txcp = be_tx_compl_get(tx_cq))) { + end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, + wrb_index, txcp); +- be_tx_compl_process(adapter, end_idx); ++ num_wrbs += be_tx_compl_process(adapter, txo, end_idx); + cmpl++; + } + if (cmpl) { + be_cq_notify(adapter, tx_cq->id, false, cmpl); ++ atomic_sub(num_wrbs, &txq->used); + cmpl = 0; ++ num_wrbs = 0; + } + + if (atomic_read(&txq->used) == 0 || ++timeo > 200) +@@ -1107,6 +1742,17 @@ static void be_tx_compl_clean(struct be_adapter *adapter) + if (atomic_read(&txq->used)) + dev_err(&adapter->pdev->dev, "%d pending tx-completions\n", + atomic_read(&txq->used)); ++ ++ /* free posted tx for which compls will never arrive */ ++ while (atomic_read(&txq->used)) { ++ sent_skb = sent_skbs[txq->tail]; ++ end_idx = txq->tail; ++ index_adv(&end_idx, ++ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1, ++ txq->len); ++ num_wrbs = be_tx_compl_process(adapter, txo, end_idx); ++ atomic_sub(num_wrbs, &txq->used); ++ } + } + + static void be_mcc_queues_destroy(struct be_adapter *adapter) +@@ -1145,8 +1791,9 @@ static int be_mcc_queues_create(struct be_adapter *adapter) + goto mcc_cq_destroy; + + /* Ask BE to create MCC queue */ +- if (be_cmd_mccq_create(adapter, q, cq)) ++ if (be_cmd_mccq_create(adapter, q, cq)) { + goto mcc_q_free; ++ } + + return 0; + +@@ -1163,16 +1810,20 @@ err: + static void be_tx_queues_destroy(struct be_adapter *adapter) + { + struct be_queue_info *q; ++ struct be_tx_obj *txo; ++ u8 i; + +- q = &adapter->tx_obj.q; +- if (q->created) +- be_cmd_q_destroy(adapter, q, QTYPE_TXQ); +- be_queue_free(adapter, q); ++ for_all_tx_queues(adapter, txo, i) { ++ q = &txo->q; ++ if (q->created) ++ be_cmd_q_destroy(adapter, q, QTYPE_TXQ); ++ be_queue_free(adapter, q); + +- q = &adapter->tx_obj.cq; +- if (q->created) +- be_cmd_q_destroy(adapter, q, QTYPE_CQ); +- be_queue_free(adapter, q); ++ q = &txo->cq; ++ if (q->created) ++ be_cmd_q_destroy(adapter, q, QTYPE_CQ); ++ be_queue_free(adapter, q); ++ } + + /* Clear any residual events */ + be_eq_clean(adapter, &adapter->tx_eq); +@@ -1183,168 +1834,210 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) + be_queue_free(adapter, q); + } + ++/* One TX event queue is shared by all TX compl qs */ + static int be_tx_queues_create(struct be_adapter *adapter) + { + struct be_queue_info *eq, *q, *cq; ++ struct be_tx_obj *txo; ++ u8 i, tc_id; + + adapter->tx_eq.max_eqd = 0; + adapter->tx_eq.min_eqd = 0; + adapter->tx_eq.cur_eqd = 96; + adapter->tx_eq.enable_aic = false; +- /* Alloc Tx Event queue */ ++ + eq = &adapter->tx_eq.q; +- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry))) ++ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, ++ sizeof(struct be_eq_entry))) + return -1; + +- /* Ask BE to create Tx Event queue */ + if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) +- goto tx_eq_free; +- /* Alloc TX eth compl queue */ +- cq = &adapter->tx_obj.cq; +- if (be_queue_alloc(adapter, cq, TX_CQ_LEN, ++ goto err; ++ adapter->tx_eq.eq_idx = adapter->eq_next_idx++; ++ ++ for_all_tx_queues(adapter, txo, i) { ++ cq = &txo->cq; ++ if (be_queue_alloc(adapter, cq, TX_CQ_LEN, + sizeof(struct be_eth_tx_compl))) +- goto tx_eq_destroy; ++ goto err; + +- /* Ask BE to create Tx eth compl queue */ +- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3)) +- goto tx_cq_free; ++ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3)) ++ goto err; + +- /* Alloc TX eth queue */ +- q = &adapter->tx_obj.q; +- if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) +- goto tx_cq_destroy; ++ q = &txo->q; ++ if (be_queue_alloc(adapter, q, TX_Q_LEN, ++ sizeof(struct be_eth_wrb))) ++ goto err; + +- /* Ask BE to create Tx eth queue */ +- if (be_cmd_txq_create(adapter, q, cq)) +- goto tx_q_free; ++ if (be_cmd_txq_create(adapter, q, cq, &tc_id)) ++ goto err; ++ ++ if (adapter->flags & BE_FLAGS_DCBX) ++ adapter->tc_txq_map[tc_id] = i; ++ } + return 0; + +-tx_q_free: +- be_queue_free(adapter, q); +-tx_cq_destroy: +- be_cmd_q_destroy(adapter, cq, QTYPE_CQ); +-tx_cq_free: +- be_queue_free(adapter, cq); +-tx_eq_destroy: +- be_cmd_q_destroy(adapter, eq, QTYPE_EQ); +-tx_eq_free: +- be_queue_free(adapter, eq); ++err: ++ be_tx_queues_destroy(adapter); + return -1; + } + + static void be_rx_queues_destroy(struct be_adapter *adapter) + { + struct be_queue_info *q; ++ struct be_rx_obj *rxo; ++ int i; + +- q = &adapter->rx_obj.q; +- if (q->created) { +- be_cmd_q_destroy(adapter, q, QTYPE_RXQ); +- be_rx_q_clean(adapter); +- } +- be_queue_free(adapter, q); ++ for_all_rx_queues(adapter, rxo, i) { ++ be_queue_free(adapter, &rxo->q); ++ ++ q = &rxo->cq; ++ if (q->created) ++ be_cmd_q_destroy(adapter, q, QTYPE_CQ); ++ be_queue_free(adapter, q); + +- q = &adapter->rx_obj.cq; +- if (q->created) +- be_cmd_q_destroy(adapter, q, QTYPE_CQ); +- be_queue_free(adapter, q); ++ q = &rxo->rx_eq.q; ++ if (q->created) ++ be_cmd_q_destroy(adapter, q, QTYPE_EQ); ++ be_queue_free(adapter, q); + +- /* Clear any residual events */ +- be_eq_clean(adapter, &adapter->rx_eq); ++ kfree(rxo->page_info_tbl); ++ } ++} + +- q = &adapter->rx_eq.q; +- if (q->created) +- be_cmd_q_destroy(adapter, q, QTYPE_EQ); +- be_queue_free(adapter, q); ++/* Is BE in a multi-channel mode */ ++static inline bool be_is_mc(struct be_adapter *adapter) { ++ return (adapter->function_mode & FLEX10_MODE || ++ adapter->function_mode & VNIC_MODE || ++ adapter->function_mode & UMC_ENABLED); ++} ++ ++static u32 be_num_rxqs_want(struct be_adapter *adapter) ++{ ++ if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) && ++ adapter->num_vfs == 0 && be_physfn(adapter) && ++ !be_is_mc(adapter)) { ++ return 1 + MAX_RSS_QS; /* one default non-RSS queue */ ++ } else { ++ dev_warn(&adapter->pdev->dev, ++ "No support for multiple RX queues\n"); ++ return 1; ++ } + } + + static int be_rx_queues_create(struct be_adapter *adapter) + { + struct be_queue_info *eq, *q, *cq; +- int rc; ++ struct be_rx_obj *rxo; ++ int rc, i; + ++ adapter->num_rx_qs = min(be_num_rxqs_want(adapter), ++ msix_enabled(adapter) ? ++ adapter->num_msix_vec - 1 : 1); ++ if (adapter->num_rx_qs != MAX_RX_QS) ++ dev_warn(&adapter->pdev->dev, ++ "Could create only %d receive queues", ++ adapter->num_rx_qs); ++ ++ adapter->max_rx_coal = gro ? BE_INIT_FRAGS_PER_FRAME : 1; + adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; +- adapter->rx_eq.max_eqd = BE_MAX_EQD; +- adapter->rx_eq.min_eqd = 0; +- adapter->rx_eq.cur_eqd = 0; +- adapter->rx_eq.enable_aic = true; ++ for_all_rx_queues(adapter, rxo, i) { ++ rxo->adapter = adapter; ++ rxo->rx_eq.max_eqd = BE_MAX_EQD; ++ rxo->rx_eq.enable_aic = true; + +- /* Alloc Rx Event queue */ +- eq = &adapter->rx_eq.q; +- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, +- sizeof(struct be_eq_entry)); +- if (rc) +- return rc; ++ /* EQ */ ++ eq = &rxo->rx_eq.q; ++ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, ++ sizeof(struct be_eq_entry)); ++ if (rc) ++ goto err; + +- /* Ask BE to create Rx Event queue */ +- rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd); +- if (rc) +- goto rx_eq_free; ++ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd); ++ if (rc) ++ goto err; + +- /* Alloc RX eth compl queue */ +- cq = &adapter->rx_obj.cq; +- rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, +- sizeof(struct be_eth_rx_compl)); +- if (rc) +- goto rx_eq_destroy; ++ rxo->rx_eq.eq_idx = adapter->eq_next_idx++; + +- /* Ask BE to create Rx eth compl queue */ +- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); +- if (rc) +- goto rx_cq_free; ++ /* CQ */ ++ cq = &rxo->cq; ++ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, ++ sizeof(struct be_eth_rx_compl)); ++ if (rc) ++ goto err; + +- /* Alloc RX eth queue */ +- q = &adapter->rx_obj.q; +- rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); +- if (rc) +- goto rx_cq_destroy; ++ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); ++ if (rc) ++ goto err; + +- /* Ask BE to create Rx eth queue */ +- rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size, +- BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); +- if (rc) +- goto rx_q_free; ++ /* Rx Q - will be created in be_open() */ ++ q = &rxo->q; ++ rc = be_queue_alloc(adapter, q, RX_Q_LEN, ++ sizeof(struct be_eth_rx_d)); ++ if (rc) ++ goto err; ++ ++ rxo->page_info_tbl = kzalloc(sizeof(struct be_rx_page_info) * ++ RX_Q_LEN, GFP_KERNEL); ++ if (!rxo->page_info_tbl) ++ goto err; ++ } + + return 0; +-rx_q_free: +- be_queue_free(adapter, q); +-rx_cq_destroy: +- be_cmd_q_destroy(adapter, cq, QTYPE_CQ); +-rx_cq_free: +- be_queue_free(adapter, cq); +-rx_eq_destroy: +- be_cmd_q_destroy(adapter, eq, QTYPE_EQ); +-rx_eq_free: +- be_queue_free(adapter, eq); +- return rc; ++err: ++ be_rx_queues_destroy(adapter); ++ return -1; + } + +-/* There are 8 evt ids per func. Retruns the evt id's bit number */ +-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) ++static bool event_peek(struct be_eq_obj *eq_obj) + { +- return eq_id - 8 * be_pci_func(adapter); ++ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q); ++ if (!eqe->evt) ++ return false; ++ else ++ return true; + } + + static irqreturn_t be_intx(int irq, void *dev) + { + struct be_adapter *adapter = dev; +- int isr; ++ struct be_rx_obj *rxo; ++ int isr, i, tx = 0 , rx = 0; + +- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + +- be_pci_func(adapter) * CEV_ISR_SIZE); +- if (!isr) +- return IRQ_NONE; ++ if (lancer_chip(adapter)) { ++ if (event_peek(&adapter->tx_eq)) ++ tx = event_handle(adapter, &adapter->tx_eq, false); ++ for_all_rx_queues(adapter, rxo, i) { ++ if (event_peek(&rxo->rx_eq)) ++ rx |= event_handle(adapter, &rxo->rx_eq, true); ++ } + +- event_handle(adapter, &adapter->tx_eq); +- event_handle(adapter, &adapter->rx_eq); ++ if (!(tx || rx)) ++ return IRQ_NONE; ++ } else { ++ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + ++ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE); ++ if (!isr) ++ return IRQ_NONE; ++ ++ if ((1 << adapter->tx_eq.eq_idx & isr)) ++ event_handle(adapter, &adapter->tx_eq, false); ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ if ((1 << rxo->rx_eq.eq_idx & isr)) ++ event_handle(adapter, &rxo->rx_eq, true); ++ } ++ } + + return IRQ_HANDLED; + } + + static irqreturn_t be_msix_rx(int irq, void *dev) + { +- struct be_adapter *adapter = dev; ++ struct be_rx_obj *rxo = dev; ++ struct be_adapter *adapter = rxo->adapter; + +- event_handle(adapter, &adapter->rx_eq); ++ event_handle(adapter, &rxo->rx_eq, true); + + return IRQ_HANDLED; + } +@@ -1353,48 +2046,72 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev) + { + struct be_adapter *adapter = dev; + +- event_handle(adapter, &adapter->tx_eq); ++ event_handle(adapter, &adapter->tx_eq, false); + + return IRQ_HANDLED; + } + + static inline bool do_gro(struct be_adapter *adapter, +- struct be_eth_rx_compl *rxcp) ++ struct be_rx_compl_info *rxcp) + { +- int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); +- int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); +- +- if (err) +- drvr_stats(adapter)->be_rxcp_err++; +- +- return (tcp_frame && !err) ? true : false; ++ return (!rxcp->tcpf || rxcp->err || adapter->max_rx_coal <= 1 || ++ (rxcp->vlanf && !vlan_configured(adapter))) ? ++ false : true; + } + + int be_poll_rx(struct napi_struct *napi, int budget) + { + struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); +- struct be_adapter *adapter = +- container_of(rx_eq, struct be_adapter, rx_eq); +- struct be_queue_info *rx_cq = &adapter->rx_obj.cq; +- struct be_eth_rx_compl *rxcp; ++ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq); ++ struct be_adapter *adapter = rxo->adapter; ++ struct be_queue_info *rx_cq = &rxo->cq; ++ struct be_rx_compl_info *rxcp; + u32 work_done; ++ bool flush_lro = false; + ++ rxo->stats.rx_polls++; + for (work_done = 0; work_done < budget; work_done++) { +- rxcp = be_rx_compl_get(adapter); ++ rxcp = be_rx_compl_get(rxo); + if (!rxcp) + break; + +- if (do_gro(adapter, rxcp)) +- be_rx_compl_process_gro(adapter, rxcp); +- else +- be_rx_compl_process(adapter, rxcp); ++ /* Is it a flush compl that has no data */ ++ if (unlikely(rxcp->num_rcvd == 0)) ++ continue; + +- be_rx_compl_reset(rxcp); ++ if (unlikely(rxcp->port != adapter->port_num)) { ++ be_rx_compl_discard(adapter, rxo, rxcp); ++ be_rx_stats_update(rxo, rxcp); ++ continue; ++ } ++ ++ if (likely((lancer_A0_chip(adapter) && !rxcp->err) || ++ !lancer_A0_chip(adapter))) { ++ if (do_gro(adapter, rxcp)) { ++ if (adapter->gro_supported) { ++ be_rx_compl_process_gro(adapter, rxo, ++ rxcp); ++ } else { ++ be_rx_compl_process_lro(adapter, rxo, ++ rxcp); ++ flush_lro = true; ++ } ++ } else { ++ be_rx_compl_process(adapter, rxo, rxcp); ++ } ++ } else if (lancer_A0_chip(adapter) && rxcp->err) { ++ be_rx_compl_discard(adapter, rxo, rxcp); ++ } ++ ++ be_rx_stats_update(rxo, rxcp); + } + ++ if (flush_lro) ++ lro_flush_all(&rxo->lro_mgr); ++ + /* Refill the queue */ +- if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) +- be_post_rx_frags(adapter); ++ if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) ++ be_post_rx_frags(rxo); + + /* All consumed */ + if (work_done < budget) { +@@ -1404,40 +2121,13 @@ int be_poll_rx(struct napi_struct *napi, int budget) + /* More to be consumed; continue with interrupts disabled */ + be_cq_notify(adapter, rx_cq->id, false, work_done); + } ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) ++ adapter->netdev->last_rx = jiffies; ++#endif + return work_done; + } + +-void be_process_tx(struct be_adapter *adapter) +-{ +- struct be_queue_info *txq = &adapter->tx_obj.q; +- struct be_queue_info *tx_cq = &adapter->tx_obj.cq; +- struct be_eth_tx_compl *txcp; +- u32 num_cmpl = 0; +- u16 end_idx; +- +- while ((txcp = be_tx_compl_get(tx_cq))) { +- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, +- wrb_index, txcp); +- be_tx_compl_process(adapter, end_idx); +- num_cmpl++; +- } +- +- if (num_cmpl) { +- be_cq_notify(adapter, tx_cq->id, true, num_cmpl); +- +- /* As Tx wrbs have been freed up, wake up netdev queue if +- * it was stopped due to lack of tx wrbs. +- */ +- if (netif_queue_stopped(adapter->netdev) && +- atomic_read(&txq->used) < txq->len / 2) { +- netif_wake_queue(adapter->netdev); +- } +- +- drvr_stats(adapter)->be_tx_events++; +- drvr_stats(adapter)->be_tx_compl += num_cmpl; +- } +-} +- + /* As TX and MCC share the same EQ check for both TX and MCC completions. + * For TX/MCC we don't honour budget; consume everything + */ +@@ -1446,96 +2136,264 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) + struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); + struct be_adapter *adapter = + container_of(tx_eq, struct be_adapter, tx_eq); ++ struct be_tx_obj *txo; ++ struct be_eth_tx_compl *txcp; ++ int tx_compl, mcc_compl, status = 0; ++ u8 i; ++ u16 num_wrbs; ++ ++ for_all_tx_queues(adapter, txo, i) { ++ tx_compl = 0; ++ num_wrbs = 0; ++ while ((txcp = be_tx_compl_get(&txo->cq))) { ++ num_wrbs += be_tx_compl_process(adapter, txo, ++ AMAP_GET_BITS(struct amap_eth_tx_compl, ++ wrb_index, txcp)); ++ tx_compl++; ++ } ++ if (tx_compl) { ++ be_cq_notify(adapter, txo->cq.id, true, tx_compl); ++ ++ atomic_sub(num_wrbs, &txo->q.used); ++ ++ /* As Tx wrbs have been freed up, wake up netdev queue ++ * if it was stopped due to lack of tx wrbs. */ ++ if (__netif_subqueue_stopped(adapter->netdev, i) && ++ atomic_read(&txo->q.used) < txo->q.len / 2) { ++ netif_wake_subqueue(adapter->netdev, i); ++ } ++ ++ adapter->drv_stats.be_tx_events++; ++ txo->stats.be_tx_compl += tx_compl; ++ } ++ } ++ ++ mcc_compl = be_process_mcc(adapter, &status); ++ ++ if (mcc_compl) { ++ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; ++ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl); ++ } + + napi_complete(napi); + +- be_process_tx(adapter); +- +- be_process_mcc(adapter); +- ++ be_eq_notify(adapter, tx_eq->q.id, true, false, 0); + return 1; + } + ++void be_detect_dump_ue(struct be_adapter *adapter) ++{ ++ u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; ++ u32 i; ++ ++ pci_read_config_dword(adapter->pdev, ++ PCICFG_UE_STATUS_LOW, &ue_status_lo); ++ pci_read_config_dword(adapter->pdev, ++ PCICFG_UE_STATUS_HIGH, &ue_status_hi); ++ pci_read_config_dword(adapter->pdev, ++ PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask); ++ pci_read_config_dword(adapter->pdev, ++ PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask); ++ ++ ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); ++ ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); ++ ++ if (ue_status_lo || ue_status_hi) { ++ adapter->ue_detected = true; ++ adapter->eeh_err = true; ++ dev_err(&adapter->pdev->dev, "UE Detected!!\n"); ++ } ++ ++ if (ue_status_lo) { ++ for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { ++ if (ue_status_lo & 1) ++ dev_err(&adapter->pdev->dev, ++ "UE: %s bit set\n", ue_status_low_desc[i]); ++ } ++ } ++ if (ue_status_hi) { ++ for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) { ++ if (ue_status_hi & 1) ++ dev_err(&adapter->pdev->dev, ++ "UE: %s bit set\n", ue_status_hi_desc[i]); ++ } ++ } ++ ++} ++ + static void be_worker(struct work_struct *work) + { + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); ++ struct be_rx_obj *rxo; ++ struct be_tx_obj *txo; ++ int i; + +- be_cmd_get_stats(adapter, &adapter->stats.cmd); ++ if (!adapter->ue_detected && !lancer_chip(adapter)) ++ be_detect_dump_ue(adapter); + +- /* Set EQ delay */ +- be_rx_eqd_update(adapter); ++ /* when interrupts are not yet enabled, just reap any pending ++ * mcc completions */ ++ if (!netif_running(adapter->netdev)) { ++ int mcc_compl, status = 0; + +- be_tx_rate_update(adapter); +- be_rx_rate_update(adapter); ++ mcc_compl = be_process_mcc(adapter, &status); + +- if (adapter->rx_post_starved) { +- adapter->rx_post_starved = false; +- be_post_rx_frags(adapter); ++ if (mcc_compl) { ++ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; ++ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); ++ } ++ ++ goto reschedule; ++ } ++ ++ if (!adapter->stats_cmd_sent) ++ be_cmd_get_stats(adapter, &adapter->stats_cmd); ++ ++ for_all_tx_queues(adapter, txo, i) ++ be_tx_rate_update(txo); ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ be_rx_rate_update(rxo); ++ be_rx_eqd_update(adapter, rxo); ++ ++ if (rxo->rx_post_starved) { ++ rxo->rx_post_starved = false; ++ be_post_rx_frags(rxo); ++ } + } + ++reschedule: ++ adapter->work_counter++; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); + } + ++static void be_msix_disable(struct be_adapter *adapter) ++{ ++ if (msix_enabled(adapter)) { ++ pci_disable_msix(adapter->pdev); ++ adapter->num_msix_vec = 0; ++ } ++} ++ + static void be_msix_enable(struct be_adapter *adapter) + { +- int i, status; ++#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */ ++ int i, status, num_vec; + +- for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) ++ num_vec = be_num_rxqs_want(adapter) + 1; ++ ++ for (i = 0; i < num_vec; i++) + adapter->msix_entries[i].entry = i; + +- status = pci_enable_msix(adapter->pdev, adapter->msix_entries, +- BE_NUM_MSIX_VECTORS); +- if (status == 0) +- adapter->msix_enabled = true; ++ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); ++ if (status == 0) { ++ goto done; ++ } else if (status >= BE_MIN_MSIX_VECTORS) { ++ num_vec = status; ++ if (pci_enable_msix(adapter->pdev, adapter->msix_entries, ++ num_vec) == 0) ++ goto done; ++ } + return; ++done: ++ adapter->num_msix_vec = num_vec; ++ return; ++} ++ ++static void be_sriov_enable(struct be_adapter *adapter) ++{ ++ be_check_sriov_fn_type(adapter); ++#ifdef CONFIG_PCI_IOV ++ if (be_physfn(adapter) && num_vfs) { ++ int status, pos; ++ u16 nvfs; ++ ++ pos = pci_find_ext_capability(adapter->pdev, ++ PCI_EXT_CAP_ID_SRIOV); ++ pci_read_config_word(adapter->pdev, ++ pos + PCI_SRIOV_TOTAL_VF, &nvfs); ++ adapter->num_vfs = num_vfs; ++ if (num_vfs > nvfs) { ++ dev_info(&adapter->pdev->dev, ++ "Device supports %d VFs and not %d\n", ++ nvfs, num_vfs); ++ adapter->num_vfs = nvfs; ++ } ++ ++ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); ++ if (status) ++ adapter->num_vfs = 0; ++ } ++#endif ++} ++ ++static void be_sriov_disable(struct be_adapter *adapter) ++{ ++#ifdef CONFIG_PCI_IOV ++ if (adapter->num_vfs > 0) { ++ pci_disable_sriov(adapter->pdev); ++ adapter->num_vfs = 0; ++ } ++#endif + } + +-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) ++static inline int be_msix_vec_get(struct be_adapter *adapter, ++ struct be_eq_obj *eq_obj) + { +- return adapter->msix_entries[ +- be_evt_bit_get(adapter, eq_id)].vector; ++ return adapter->msix_entries[eq_obj->eq_idx].vector; + } + + static int be_request_irq(struct be_adapter *adapter, + struct be_eq_obj *eq_obj, +- void *handler, char *desc) ++ void *handler, char *desc, void *context) + { + struct net_device *netdev = adapter->netdev; + int vec; + + sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); +- vec = be_msix_vec_get(adapter, eq_obj->q.id); +- return request_irq(vec, handler, 0, eq_obj->desc, adapter); ++ vec = be_msix_vec_get(adapter, eq_obj); ++ return request_irq(vec, handler, 0, eq_obj->desc, context); + } + +-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj) ++static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj, ++ void *context) + { +- int vec = be_msix_vec_get(adapter, eq_obj->q.id); +- free_irq(vec, adapter); ++ int vec = be_msix_vec_get(adapter, eq_obj); ++ free_irq(vec, context); + } + + static int be_msix_register(struct be_adapter *adapter) + { +- int status; ++ struct be_rx_obj *rxo; ++ int status, i; ++ char qname[10]; + +- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx"); ++ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx", ++ adapter); + if (status) + goto err; + +- status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx"); +- if (status) +- goto free_tx_irq; ++ for_all_rx_queues(adapter, rxo, i) { ++ sprintf(qname, "rxq%d", i); ++ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx, ++ qname, rxo); ++ if (status) ++ goto err_msix; ++ } + + return 0; + +-free_tx_irq: +- be_free_irq(adapter, &adapter->tx_eq); ++err_msix: ++ be_free_irq(adapter, &adapter->tx_eq, adapter); ++ ++ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--) ++ be_free_irq(adapter, &rxo->rx_eq, rxo); ++ + err: + dev_warn(&adapter->pdev->dev, + "MSIX Request IRQ failed - err %d\n", status); +- pci_disable_msix(adapter->pdev); +- adapter->msix_enabled = false; ++ be_msix_disable(adapter); + return status; + } + +@@ -1544,10 +2402,13 @@ static int be_irq_register(struct be_adapter *adapter) + struct net_device *netdev = adapter->netdev; + int status; + +- if (adapter->msix_enabled) { ++ if (msix_enabled(adapter)) { + status = be_msix_register(adapter); + if (status == 0) + goto done; ++ /* INTx is not supported for VF */ ++ if (!be_physfn(adapter)) ++ return status; + } + + /* INTx */ +@@ -1567,87 +2428,363 @@ done: + static void be_irq_unregister(struct be_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; ++ struct be_rx_obj *rxo; ++ int i; + + if (!adapter->isr_registered) + return; + + /* INTx */ +- if (!adapter->msix_enabled) { ++ if (!msix_enabled(adapter)) { + free_irq(netdev->irq, adapter); + goto done; + } + + /* MSIx */ +- be_free_irq(adapter, &adapter->tx_eq); +- be_free_irq(adapter, &adapter->rx_eq); ++ be_free_irq(adapter, &adapter->tx_eq, adapter); ++ ++ for_all_rx_queues(adapter, rxo, i) ++ be_free_irq(adapter, &rxo->rx_eq, rxo); ++ + done: + adapter->isr_registered = false; +- return; + } + +-static int be_open(struct net_device *netdev) ++static u16 be_select_queue(struct net_device *netdev, ++ struct sk_buff *skb) + { + struct be_adapter *adapter = netdev_priv(netdev); +- struct be_eq_obj *rx_eq = &adapter->rx_eq; ++ u8 prio; ++ ++ if (adapter->num_tx_qs == 1) ++ return 0; ++ ++ prio = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; ++ return adapter->tc_txq_map[adapter->prio_tc_map[prio]]; ++} ++ ++static void be_rx_queues_clear(struct be_adapter *adapter) ++{ ++ struct be_queue_info *q; ++ struct be_rx_obj *rxo; ++ int i; ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ q = &rxo->q; ++ if (q->created) { ++ be_cmd_rxq_destroy(adapter, q); ++ /* After the rxq is invalidated, wait for a grace time ++ * of 1ms for all dma to end and the flush compl to ++ * arrive ++ */ ++ mdelay(1); ++ be_rx_q_clean(adapter, rxo); ++ } ++ ++ /* Clear any residual events */ ++ q = &rxo->rx_eq.q; ++ if (q->created) ++ be_eq_clean(adapter, &rxo->rx_eq); ++ } ++} ++ ++static int be_close(struct net_device *netdev) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct be_rx_obj *rxo; ++ struct be_tx_obj *txo; + struct be_eq_obj *tx_eq = &adapter->tx_eq; +- bool link_up; +- int status; ++ int vec, i; ++ ++ be_async_mcc_disable(adapter); ++ ++ netif_stop_queue(netdev); ++ netif_carrier_off(netdev); ++ adapter->link_status = LINK_DOWN; ++ ++ if (!lancer_chip(adapter)) ++ be_intr_set(adapter, false); ++ ++ for_all_rx_queues(adapter, rxo, i) ++ napi_disable(&rxo->rx_eq.napi); ++ ++ napi_disable(&tx_eq->napi); ++ ++ if (lancer_chip(adapter)) { ++ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); ++ for_all_rx_queues(adapter, rxo, i) ++ be_cq_notify(adapter, rxo->cq.id, false, 0); ++ for_all_tx_queues(adapter, txo, i) ++ be_cq_notify(adapter, txo->cq.id, false, 0); ++ } ++ ++ if (msix_enabled(adapter)) { ++ vec = be_msix_vec_get(adapter, tx_eq); ++ synchronize_irq(vec); ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ vec = be_msix_vec_get(adapter, &rxo->rx_eq); ++ synchronize_irq(vec); ++ } ++ } else { ++ synchronize_irq(netdev->irq); ++ } ++ be_irq_unregister(adapter); ++ ++ /* Wait for all pending tx completions to arrive so that ++ * all tx skbs are freed. ++ */ ++ for_all_tx_queues(adapter, txo, i) ++ be_tx_compl_clean(adapter, txo); ++ ++ be_rx_queues_clear(adapter); ++ return 0; ++} ++ ++static int be_rx_queues_setup(struct be_adapter *adapter) ++{ ++ struct be_rx_obj *rxo; ++ int rc, i; ++ u8 rsstable[MAX_RSS_QS]; ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, ++ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE, ++ adapter->if_handle, ++ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id); ++ if (rc) ++ return rc; ++ } ++ ++ if (be_multi_rxq(adapter)) { ++ for_all_rss_queues(adapter, rxo, i) ++ rsstable[i] = rxo->rss_id; ++ ++ rc = be_cmd_rss_config(adapter, rsstable, ++ adapter->num_rx_qs - 1); ++ if (rc) ++ return rc; ++ } + + /* First time posting */ +- be_post_rx_frags(adapter); ++ for_all_rx_queues(adapter, rxo, i) { ++ be_post_rx_frags(rxo); ++ napi_enable(&rxo->rx_eq.napi); ++ } ++ return 0; ++} ++ ++static int be_open(struct net_device *netdev) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct be_eq_obj *tx_eq = &adapter->tx_eq; ++ struct be_rx_obj *rxo; ++ int link_status; ++ int status, i; ++ u8 mac_speed; ++ u16 link_speed; ++ ++ status = be_rx_queues_setup(adapter); ++ if (status) ++ goto err; + +- napi_enable(&rx_eq->napi); + napi_enable(&tx_eq->napi); + + be_irq_register(adapter); + +- be_intr_set(adapter, true); ++ if (!lancer_chip(adapter)) ++ be_intr_set(adapter, true); + + /* The evt queues are created in unarmed state; arm them */ +- be_eq_notify(adapter, rx_eq->q.id, true, false, 0); ++ for_all_rx_queues(adapter, rxo, i) { ++ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0); ++ be_cq_notify(adapter, rxo->cq.id, true, 0); ++ } + be_eq_notify(adapter, tx_eq->q.id, true, false, 0); + +- /* Rx compl queue may be in unarmed state; rearm it */ +- be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); ++ /* Now that interrupts are on we can process async mcc */ ++ be_async_mcc_enable(adapter); + +- status = be_cmd_link_status_query(adapter, &link_up); ++ status = be_cmd_link_status_query(adapter, &link_status, &mac_speed, ++ &link_speed, 0); + if (status) +- goto ret_sts; +- be_link_status_update(adapter, link_up); ++ goto err; ++ be_link_status_update(adapter, link_status); + +- status = be_vid_config(adapter); ++ status = be_vid_config(adapter, false, 0); + if (status) +- goto ret_sts; ++ goto err; + +- status = be_cmd_set_flow_control(adapter, +- adapter->tx_fc, adapter->rx_fc); +- if (status) +- goto ret_sts; ++ if (be_physfn(adapter)) { ++ status = be_cmd_set_flow_control(adapter, ++ adapter->tx_fc, adapter->rx_fc); ++ if (status) ++ goto err; ++ } ++ ++ return 0; ++err: ++ be_close(adapter->netdev); ++ return -EIO; ++} ++ ++static int be_setup_wol(struct be_adapter *adapter, bool enable) ++{ ++ struct be_dma_mem cmd; ++ int status = 0; ++ u8 mac[ETH_ALEN]; ++ ++ memset(mac, 0, ETH_ALEN); ++ ++ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); ++ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); ++ if (cmd.va == NULL) ++ return -1; ++ memset(cmd.va, 0, cmd.size); ++ ++ if (enable) { ++ status = pci_write_config_dword(adapter->pdev, ++ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); ++ if (status) { ++ dev_err(&adapter->pdev->dev, ++ "Could not enable Wake-on-lan\n"); ++ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, ++ cmd.dma); ++ return status; ++ } ++ status = be_cmd_enable_magic_wol(adapter, ++ adapter->netdev->dev_addr, &cmd); ++ pci_enable_wake(adapter->pdev, PCI_D3hot, 1); ++ pci_enable_wake(adapter->pdev, PCI_D3cold, 1); ++ } else { ++ status = be_cmd_enable_magic_wol(adapter, mac, &cmd); ++ pci_enable_wake(adapter->pdev, PCI_D3hot, 0); ++ pci_enable_wake(adapter->pdev, PCI_D3cold, 0); ++ } ++ ++ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); ++ return status; ++} ++ ++/* ++ * Generate a seed MAC address from the PF MAC Address using jhash. ++ * MAC Address for VFs are assigned incrementally starting from the seed. ++ * These addresses are programmed in the ASIC by the PF and the VF driver ++ * queries for the MAC address during its probe. ++ */ ++static inline int be_vf_eth_addr_config(struct be_adapter *adapter) ++{ ++ u32 vf = 0; ++ int status = 0; ++ u8 mac[ETH_ALEN]; ++ ++ be_vf_eth_addr_generate(adapter, mac); ++ ++ for (vf = 0; vf < adapter->num_vfs; vf++) { ++ status = be_cmd_pmac_add(adapter, mac, ++ adapter->vf_cfg[vf].vf_if_handle, ++ &adapter->vf_cfg[vf].vf_pmac_id, ++ vf + 1); ++ if (status) ++ dev_err(&adapter->pdev->dev, ++ "Mac address add failed for VF %d\n", vf); ++ else ++ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); + +- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); +-ret_sts: ++ mac[5] += 1; ++ } + return status; + } + ++static inline void be_vf_eth_addr_rem(struct be_adapter *adapter) ++{ ++ u32 vf; ++ ++ for (vf = 0; vf < adapter->num_vfs; vf++) { ++ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) ++ be_cmd_pmac_del(adapter, ++ adapter->vf_cfg[vf].vf_if_handle, ++ adapter->vf_cfg[vf].vf_pmac_id, vf + 1); ++ } ++} ++ ++static int be_num_txqs_want(struct be_adapter *adapter) ++{ ++ if (adapter->num_vfs > 0 || be_is_mc(adapter) || ++ lancer_chip(adapter) || !be_physfn(adapter) || ++ adapter->generation == BE_GEN2) ++ return 1; ++ else ++ return MAX_TX_QS; ++} ++ + static int be_setup(struct be_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; +- u32 cap_flags, en_flags; +- int status; +- +- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | +- BE_IF_FLAGS_MCAST_PROMISCUOUS | +- BE_IF_FLAGS_PROMISCUOUS | +- BE_IF_FLAGS_PASS_L3L4_ERRORS; +- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | +- BE_IF_FLAGS_PASS_L3L4_ERRORS; ++ int status, fw_num_txqs, num_txqs; ++ u32 cap_flags, en_flags, vf = 0; ++ u8 mac[ETH_ALEN]; ++ ++ num_txqs = be_num_txqs_want(adapter); ++ if (num_txqs > 1) { ++ be_cmd_req_pg_pfc(adapter, &fw_num_txqs); ++ num_txqs = min(num_txqs, fw_num_txqs); ++ } ++ adapter->num_tx_qs = num_txqs; ++ if (adapter->num_tx_qs != MAX_TX_QS) ++ netif_set_real_num_tx_queues(adapter->netdev, ++ adapter->num_tx_qs); ++ ++ be_cmd_req_native_mode(adapter); ++ ++ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | ++ BE_IF_FLAGS_BROADCAST | ++ BE_IF_FLAGS_MULTICAST; ++ ++ if (be_physfn(adapter)) { ++ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) { ++ cap_flags |= BE_IF_FLAGS_RSS; ++ en_flags |= BE_IF_FLAGS_RSS; ++ } ++ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | ++ BE_IF_FLAGS_PROMISCUOUS; ++ if (!lancer_A0_chip(adapter)) { ++ cap_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; ++ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; ++ } ++ } + + status = be_cmd_if_create(adapter, cap_flags, en_flags, + netdev->dev_addr, false/* pmac_invalid */, +- &adapter->if_handle, &adapter->pmac_id); ++ &adapter->if_handle, &adapter->pmac_id, 0); + if (status != 0) + goto do_none; + ++ if (be_physfn(adapter)) { ++ while (vf < adapter->num_vfs) { ++ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | ++ BE_IF_FLAGS_BROADCAST; ++ status = be_cmd_if_create(adapter, cap_flags, ++ en_flags, mac, true, ++ &adapter->vf_cfg[vf].vf_if_handle, ++ NULL, vf+1); ++ if (status) { ++ dev_err(&adapter->pdev->dev, ++ "Interface Create failed for VF %d\n", vf); ++ goto if_destroy; ++ } ++ adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID; ++ vf++; ++ } ++ } else { ++ status = be_cmd_mac_addr_query(adapter, mac, ++ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); ++ if (!status) { ++ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); ++ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); ++ } ++ } ++ + status = be_tx_queues_create(adapter); + if (status != 0) + goto if_destroy; +@@ -1656,10 +2793,15 @@ static int be_setup(struct be_adapter *adapter) + if (status != 0) + goto tx_qs_destroy; + ++ /* Allow all priorities by default. A GRP5 evt may modify this */ ++ adapter->vlan_prio_bmap = 0xff; ++ + status = be_mcc_queues_create(adapter); + if (status != 0) + goto rx_qs_destroy; + ++ adapter->link_speed = -1; ++ + return 0; + + rx_qs_destroy: +@@ -1667,158 +2809,392 @@ rx_qs_destroy: + tx_qs_destroy: + be_tx_queues_destroy(adapter); + if_destroy: +- be_cmd_if_destroy(adapter, adapter->if_handle); ++ if (be_physfn(adapter)) { ++ for (vf = 0; vf < adapter->num_vfs; vf++) ++ if (adapter->vf_cfg[vf].vf_if_handle) ++ be_cmd_if_destroy(adapter, ++ adapter->vf_cfg[vf].vf_if_handle, ++ vf + 1); ++ } ++ be_cmd_if_destroy(adapter, adapter->if_handle, 0); + do_none: + return status; + } + + static int be_clear(struct be_adapter *adapter) + { ++ int vf; ++ ++ if (be_physfn(adapter) && adapter->num_vfs) ++ be_vf_eth_addr_rem(adapter); ++ + be_mcc_queues_destroy(adapter); + be_rx_queues_destroy(adapter); + be_tx_queues_destroy(adapter); ++ adapter->eq_next_idx = 0; + +- be_cmd_if_destroy(adapter, adapter->if_handle); ++ if (be_physfn(adapter)) { ++ for (vf = 0; vf < adapter->num_vfs; vf++) ++ if (adapter->vf_cfg[vf].vf_if_handle) ++ be_cmd_if_destroy(adapter, ++ adapter->vf_cfg[vf].vf_if_handle, vf + 1); ++ } ++ be_cmd_if_destroy(adapter, adapter->if_handle, 0); + ++ /* tell fw we're done with firing cmds */ ++ be_cmd_fw_clean(adapter); + return 0; + } + +-static int be_close(struct net_device *netdev) ++static void be_cpy_drv_ver(struct be_adapter *adapter, void *va) ++{ ++ struct mgmt_controller_attrib *attrib = ++ (struct mgmt_controller_attrib *) ((u8*) va + ++ sizeof(struct be_cmd_resp_hdr)); ++ ++ memcpy(attrib->hba_attribs.driver_version_string, ++ DRV_VER, sizeof(DRV_VER)); ++ attrib->pci_bus_number = adapter->pdev->bus->number; ++ attrib->pci_device_number = PCI_SLOT(adapter->pdev->devfn); ++ return; ++} ++ ++#define IOCTL_COOKIE "SERVERENGINES CORP" ++static int be_do_ioctl(struct net_device *netdev, ++ struct ifreq *ifr, int cmd) + { + struct be_adapter *adapter = netdev_priv(netdev); +- struct be_eq_obj *rx_eq = &adapter->rx_eq; +- struct be_eq_obj *tx_eq = &adapter->tx_eq; +- int vec; ++ struct be_cmd_req_hdr req; ++ struct be_cmd_resp_hdr *resp; ++ void *data = ifr->ifr_data; ++ void *ioctl_ptr; ++ void *va; ++ dma_addr_t dma; ++ u32 req_size; ++ int status, ret = 0; ++ u8 cookie[32]; ++ ++ switch (cmd) { ++ case SIOCDEVPRIVATE: ++ if (copy_from_user(cookie, data, strlen(IOCTL_COOKIE))) ++ return -EFAULT; ++ ++ if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE))) ++ return -EINVAL; + +- cancel_delayed_work_sync(&adapter->work); ++ ioctl_ptr = (u8 *)data + strlen(IOCTL_COOKIE); ++ if (copy_from_user(&req, ioctl_ptr, ++ sizeof(struct be_cmd_req_hdr))) ++ return -EFAULT; + +- netif_stop_queue(netdev); +- netif_carrier_off(netdev); +- adapter->link_up = false; ++ req_size = le32_to_cpu(req.request_length); ++ if (req_size > 65536) ++ return -EINVAL; + +- be_intr_set(adapter, false); ++ req_size += sizeof(struct be_cmd_req_hdr); ++ va = pci_alloc_consistent(adapter->pdev, req_size, &dma); ++ if (!va) ++ return -ENOMEM; ++ if (copy_from_user(va, ioctl_ptr, req_size)) { ++ ret = -EFAULT; ++ break; ++ } + +- if (adapter->msix_enabled) { +- vec = be_msix_vec_get(adapter, tx_eq->q.id); +- synchronize_irq(vec); +- vec = be_msix_vec_get(adapter, rx_eq->q.id); +- synchronize_irq(vec); +- } else { +- synchronize_irq(netdev->irq); ++ status = be_cmd_pass_ext_ioctl(adapter, dma, req_size, va); ++ if (status == -1) { ++ ret = -EIO; ++ break; ++ } ++ ++ resp = (struct be_cmd_resp_hdr *) va; ++ if (!status) { ++ if (req.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) ++ be_cpy_drv_ver(adapter, va); ++ } ++ ++ if (copy_to_user(ioctl_ptr, va, req_size)) { ++ ret = -EFAULT; ++ break; ++ } ++ break; ++ default: ++ return -EOPNOTSUPP; + } +- be_irq_unregister(adapter); + +- napi_disable(&rx_eq->napi); +- napi_disable(&tx_eq->napi); ++ if (va) ++ pci_free_consistent(adapter->pdev, req_size, va, dma); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_NET_POLL_CONTROLLER ++static void be_netpoll(struct net_device *netdev) ++{ ++ struct be_adapter *adapter = netdev_priv(netdev); ++ struct be_rx_obj *rxo; ++ int i; + +- /* Wait for all pending tx completions to arrive so that +- * all tx skbs are freed. +- */ +- be_tx_compl_clean(adapter); ++ event_handle(adapter, &adapter->tx_eq, false); ++ for_all_rx_queues(adapter, rxo, i) ++ event_handle(adapter, &rxo->rx_eq, true); ++ ++ return; ++} ++#endif ++ ++static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, ++ void **ip_hdr, void **tcpudp_hdr, ++ u64 *hdr_flags, void *priv) ++{ ++ struct ethhdr *eh; ++ struct vlan_ethhdr *veh; ++ struct iphdr *iph; ++ u8 *va = page_address(frag->page) + frag->page_offset; ++ unsigned long ll_hlen; ++ ++ prefetch(va); ++ eh = (struct ethhdr *)va; ++ *mac_hdr = eh; ++ ll_hlen = ETH_HLEN; ++ if (eh->h_proto != htons(ETH_P_IP)) { ++ if (eh->h_proto == htons(ETH_P_8021Q)) { ++ veh = (struct vlan_ethhdr *)va; ++ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) ++ return -1; ++ ++ ll_hlen += VLAN_HLEN; ++ } else { ++ return -1; ++ } ++ } ++ *hdr_flags = LRO_IPV4; ++ iph = (struct iphdr *)(va + ll_hlen); ++ *ip_hdr = iph; ++ if (iph->protocol != IPPROTO_TCP) ++ return -1; ++ *hdr_flags |= LRO_TCP; ++ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); + + return 0; + } + +-#define FW_FILE_HDR_SIGN "ServerEngines Corp. " ++static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev) ++{ ++ struct net_lro_mgr *lro_mgr; ++ struct be_rx_obj *rxo; ++ int i; ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ lro_mgr = &rxo->lro_mgr; ++ lro_mgr->dev = netdev; ++ lro_mgr->features = LRO_F_NAPI; ++ lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; ++ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; ++ lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS; ++ lro_mgr->lro_arr = rxo->lro_desc; ++ lro_mgr->get_frag_header = be_get_frag_header; ++ lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME; ++ } ++ ++#ifdef NETIF_F_GRO ++ netdev->features |= NETIF_F_GRO; ++ adapter->gro_supported = true; ++#endif ++} ++ ++#define FW_FILE_HDR_SIGN "ServerEngines Corp. " + char flash_cookie[2][16] = {"*** SE FLAS", + "H DIRECTORY *** "}; +-static int be_flash_image(struct be_adapter *adapter, ++ ++static bool be_flash_redboot(struct be_adapter *adapter, ++ const u8 *p, u32 img_start, int image_size, ++ int hdr_size) ++{ ++ u32 crc_offset; ++ u8 flashed_crc[4]; ++ int status; ++ ++ crc_offset = hdr_size + img_start + image_size - 4; ++ ++ p += crc_offset; ++ ++ status = be_cmd_get_flash_crc(adapter, flashed_crc, ++ (image_size - 4)); ++ if (status) { ++ dev_err(&adapter->pdev->dev, ++ "could not get crc from flash, not flashing redboot\n"); ++ return false; ++ } ++ ++ /*update redboot only if crc does not match*/ ++ if (!memcmp(flashed_crc, p, 4)) ++ return false; ++ else ++ return true; ++} ++ ++static bool phy_flashing_required(struct be_adapter *adapter) ++{ ++ int status = 0; ++ struct be_phy_info phy_info; ++ ++ status = be_cmd_get_phy_info(adapter, &phy_info); ++ if (status) ++ return false; ++ if ((phy_info.phy_type == TN_8022) && ++ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) { ++ return true; ++ } ++ return false; ++} ++ ++static int be_flash_data(struct be_adapter *adapter, + const struct firmware *fw, +- struct be_dma_mem *flash_cmd, u32 flash_type) ++ struct be_dma_mem *flash_cmd, int num_of_images) ++ + { +- int status; +- u32 flash_op, image_offset = 0, total_bytes, image_size = 0; ++ int status = 0, i, filehdr_size = 0; ++ u32 total_bytes = 0, flash_op; + int num_bytes; + const u8 *p = fw->data; + struct be_cmd_write_flashrom *req = flash_cmd->va; ++ struct flash_comp *pflashcomp; ++ int num_comp; + +- switch (flash_type) { +- case FLASHROM_TYPE_ISCSI_ACTIVE: +- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START; +- image_size = FLASH_IMAGE_MAX_SIZE; +- break; +- case FLASHROM_TYPE_ISCSI_BACKUP: +- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START; +- image_size = FLASH_IMAGE_MAX_SIZE; +- break; +- case FLASHROM_TYPE_FCOE_FW_ACTIVE: +- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START; +- image_size = FLASH_IMAGE_MAX_SIZE; +- break; +- case FLASHROM_TYPE_FCOE_FW_BACKUP: +- image_offset = FLASH_FCoE_BACKUP_IMAGE_START; +- image_size = FLASH_IMAGE_MAX_SIZE; +- break; +- case FLASHROM_TYPE_BIOS: +- image_offset = FLASH_iSCSI_BIOS_START; +- image_size = FLASH_BIOS_IMAGE_MAX_SIZE; +- break; +- case FLASHROM_TYPE_FCOE_BIOS: +- image_offset = FLASH_FCoE_BIOS_START; +- image_size = FLASH_BIOS_IMAGE_MAX_SIZE; +- break; +- case FLASHROM_TYPE_PXE_BIOS: +- image_offset = FLASH_PXE_BIOS_START; +- image_size = FLASH_BIOS_IMAGE_MAX_SIZE; +- break; +- default: +- return 0; ++ struct flash_comp gen3_flash_types[10] = { ++ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE, ++ FLASH_IMAGE_MAX_SIZE_g3}, ++ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT, ++ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3}, ++ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS, ++ FLASH_BIOS_IMAGE_MAX_SIZE_g3}, ++ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS, ++ FLASH_BIOS_IMAGE_MAX_SIZE_g3}, ++ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS, ++ FLASH_BIOS_IMAGE_MAX_SIZE_g3}, ++ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP, ++ FLASH_IMAGE_MAX_SIZE_g3}, ++ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE, ++ FLASH_IMAGE_MAX_SIZE_g3}, ++ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP, ++ FLASH_IMAGE_MAX_SIZE_g3}, ++ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW, ++ FLASH_NCSI_IMAGE_MAX_SIZE_g3}, ++ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW, ++ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3} ++ }; ++ struct flash_comp gen2_flash_types[8] = { ++ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE, ++ FLASH_IMAGE_MAX_SIZE_g2}, ++ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT, ++ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2}, ++ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS, ++ FLASH_BIOS_IMAGE_MAX_SIZE_g2}, ++ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS, ++ FLASH_BIOS_IMAGE_MAX_SIZE_g2}, ++ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS, ++ FLASH_BIOS_IMAGE_MAX_SIZE_g2}, ++ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP, ++ FLASH_IMAGE_MAX_SIZE_g2}, ++ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE, ++ FLASH_IMAGE_MAX_SIZE_g2}, ++ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP, ++ FLASH_IMAGE_MAX_SIZE_g2} ++ }; ++ if (adapter->generation == BE_GEN3) { ++ pflashcomp = gen3_flash_types; ++ filehdr_size = sizeof(struct flash_file_hdr_g3); ++ num_comp = ARRAY_SIZE(gen3_flash_types); ++ } else { ++ pflashcomp = gen2_flash_types; ++ filehdr_size = sizeof(struct flash_file_hdr_g2); ++ num_comp = ARRAY_SIZE(gen2_flash_types); + } ++ for (i = 0; i < num_comp; i++) { ++ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) && ++ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) ++ continue; ++ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) { ++ if (!phy_flashing_required(adapter)) ++ continue; ++ } ++ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && ++ (!be_flash_redboot(adapter, fw->data, ++ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size + ++ (num_of_images * sizeof(struct image_hdr))))) ++ continue; + +- p += sizeof(struct flash_file_hdr) + image_offset; +- if (p + image_size > fw->data + fw->size) +- return -1; +- +- total_bytes = image_size; +- +- while (total_bytes) { +- if (total_bytes > 32*1024) +- num_bytes = 32*1024; +- else +- num_bytes = total_bytes; +- total_bytes -= num_bytes; +- +- if (!total_bytes) +- flash_op = FLASHROM_OPER_FLASH; +- else +- flash_op = FLASHROM_OPER_SAVE; +- memcpy(req->params.data_buf, p, num_bytes); +- p += num_bytes; +- status = be_cmd_write_flashrom(adapter, flash_cmd, +- flash_type, flash_op, num_bytes); +- if (status) { +- dev_err(&adapter->pdev->dev, +- "cmd to write to flash rom failed. type/op %d/%d\n", +- flash_type, flash_op); ++ p = fw->data; ++ p += filehdr_size + pflashcomp[i].offset ++ + (num_of_images * sizeof(struct image_hdr)); ++ if (p + pflashcomp[i].size > fw->data + fw->size) + return -1; ++ total_bytes = pflashcomp[i].size; ++ while (total_bytes) { ++ if (total_bytes > 32*1024) ++ num_bytes = 32*1024; ++ else ++ num_bytes = total_bytes; ++ total_bytes -= num_bytes; ++ if (!total_bytes) { ++ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) ++ flash_op = FLASHROM_OPER_PHY_FLASH; ++ else ++ flash_op = FLASHROM_OPER_FLASH; ++ } else { ++ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) ++ flash_op = FLASHROM_OPER_PHY_SAVE; ++ else ++ flash_op = FLASHROM_OPER_SAVE; ++ } ++ memcpy(req->params.data_buf, p, num_bytes); ++ p += num_bytes; ++ status = be_cmd_write_flashrom(adapter, flash_cmd, ++ pflashcomp[i].optype, flash_op, num_bytes); ++ if (status) { ++ if ((status == ILLEGAL_IOCTL_REQ) && ++ (pflashcomp[i].optype == ++ IMG_TYPE_PHY_FW)) ++ break; ++ dev_err(&adapter->pdev->dev, ++ "cmd to write to flash rom failed.\n"); ++ return -1; ++ } ++ yield(); + } +- yield(); + } +- + return 0; + } + ++static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr) ++{ ++ if (fhdr == NULL) ++ return 0; ++ if (fhdr->build[0] == '3') ++ return BE_GEN3; ++ else if (fhdr->build[0] == '2') ++ return BE_GEN2; ++ else ++ return 0; ++} ++ + int be_load_fw(struct be_adapter *adapter, u8 *func) + { + char fw_file[ETHTOOL_FLASH_MAX_FILENAME]; + const struct firmware *fw; +- struct flash_file_hdr *fhdr; +- struct flash_section_info *fsec = NULL; ++ struct flash_file_hdr_g2 *fhdr; ++ struct flash_file_hdr_g3 *fhdr3; ++ struct image_hdr *img_hdr_ptr = NULL; + struct be_dma_mem flash_cmd; +- int status; ++ int status, i = 0, num_imgs = 0; + const u8 *p; +- bool entry_found = false; +- int flash_type; +- char fw_ver[FW_VER_LEN]; +- char fw_cfg; + +- status = be_cmd_get_fw_ver(adapter, fw_ver); +- if (status) +- return status; ++ if (!netif_running(adapter->netdev)) { ++ dev_err(&adapter->pdev->dev, ++ "Firmware load not allowed (interface is down)\n"); ++ return -1; ++ } + +- fw_cfg = *(fw_ver + 2); +- if (fw_cfg == '0') +- fw_cfg = '1'; + strcpy(fw_file, func); + + status = request_firmware(&fw, fw_file, &adapter->pdev->dev); +@@ -1826,34 +3202,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) + goto fw_exit; + + p = fw->data; +- fhdr = (struct flash_file_hdr *) p; +- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) { +- dev_err(&adapter->pdev->dev, +- "Firmware(%s) load error (signature did not match)\n", +- fw_file); +- status = -1; +- goto fw_exit; +- } +- ++ fhdr = (struct flash_file_hdr_g2 *) p; + dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); + +- p += sizeof(struct flash_file_hdr); +- while (p < (fw->data + fw->size)) { +- fsec = (struct flash_section_info *)p; +- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) { +- entry_found = true; +- break; +- } +- p += 32; +- } +- +- if (!entry_found) { +- status = -1; +- dev_err(&adapter->pdev->dev, +- "Flash cookie not found in firmware image\n"); +- goto fw_exit; +- } +- + flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; + flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, + &flash_cmd.dma); +@@ -1864,12 +3215,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) + goto fw_exit; + } + +- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE; +- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) { +- status = be_flash_image(adapter, fw, &flash_cmd, +- flash_type); +- if (status) +- break; ++ if ((adapter->generation == BE_GEN3) && ++ (get_ufigen_type(fhdr) == BE_GEN3)) { ++ fhdr3 = (struct flash_file_hdr_g3 *) fw->data; ++ num_imgs = le32_to_cpu(fhdr3->num_imgs); ++ for (i = 0; i < num_imgs; i++) { ++ img_hdr_ptr = (struct image_hdr *) (fw->data + ++ (sizeof(struct flash_file_hdr_g3) + ++ i * sizeof(struct image_hdr))); ++ if (le32_to_cpu(img_hdr_ptr->imageid) == 1) ++ status = be_flash_data(adapter, fw, &flash_cmd, ++ num_imgs); ++ } ++ } else if ((adapter->generation == BE_GEN2) && ++ (get_ufigen_type(fhdr) == BE_GEN2)) { ++ status = be_flash_data(adapter, fw, &flash_cmd, 0); ++ } else { ++ dev_err(&adapter->pdev->dev, ++ "UFI and Interface are not compatible for flashing\n"); ++ status = -1; + } + + pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, +@@ -1879,14 +3243,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) + goto fw_exit; + } + +- dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n"); ++ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); + + fw_exit: + release_firmware(fw); + return status; + } + +-static struct net_device_ops be_netdev_ops = { ++static net_device_ops_no_const be_netdev_ops = { + .ndo_open = be_open, + .ndo_stop = be_close, + .ndo_start_xmit = be_xmit, +@@ -1898,15 +3262,32 @@ static struct net_device_ops be_netdev_ops = { + .ndo_vlan_rx_register = be_vlan_register, + .ndo_vlan_rx_add_vid = be_vlan_add_vid, + .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, ++#ifdef HAVE_SRIOV_CONFIG ++ .ndo_set_vf_mac = be_set_vf_mac, ++ .ndo_set_vf_vlan = be_set_vf_vlan, ++ .ndo_set_vf_tx_rate = be_set_vf_tx_rate, ++ .ndo_get_vf_config = be_get_vf_config, ++#endif ++ .ndo_do_ioctl = be_do_ioctl, ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ .ndo_poll_controller = be_netpoll, ++#endif + }; + +-static void be_netdev_init(struct net_device *netdev) ++static int be_netdev_init(struct net_device *netdev) + { + struct be_adapter *adapter = netdev_priv(netdev); ++ struct be_rx_obj *rxo; ++ int i, status = 0; + + netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | +- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | +- NETIF_F_GRO; ++ NETIF_F_HW_VLAN_TX | NETIF_F_HW_CSUM | NETIF_F_TSO6; ++ ++ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | ++ NETIF_F_HW_CSUM; ++ ++ netdev->features |= NETIF_F_VLAN_SG | NETIF_F_VLAN_TSO | ++ NETIF_F_VLAN_CSUM; + + netdev->flags |= IFF_MULTICAST; + +@@ -1918,17 +3299,30 @@ static void be_netdev_init(struct net_device *netdev) + + netif_set_gso_max_size(netdev, 65535); + ++ if (adapter->flags & BE_FLAGS_DCBX) ++ be_netdev_ops.ndo_select_queue = be_select_queue; + BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); +- ++ + SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); + +- netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, +- BE_NAPI_WEIGHT); +- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, ++ be_lro_init(adapter, netdev); ++ ++ for_all_rx_queues(adapter, rxo, i) { ++ status = be_netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx, ++ BE_NAPI_WEIGHT); ++ if (status) { ++ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail" ++ "for rxo:%d\n", i); ++ return status; ++ } ++ } ++ status = be_netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, + BE_NAPI_WEIGHT); ++ if (status) ++ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail" ++ "for tx\n"); + +- netif_carrier_off(netdev); +- netif_stop_queue(netdev); ++ return status; + } + + static void be_unmap_pci_bars(struct be_adapter *adapter) +@@ -1937,37 +3331,62 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) + iounmap(adapter->csr); + if (adapter->db) + iounmap(adapter->db); +- if (adapter->pcicfg) ++ if (adapter->pcicfg && be_physfn(adapter)) + iounmap(adapter->pcicfg); + } + + static int be_map_pci_bars(struct be_adapter *adapter) + { ++ struct pci_dev *pdev = adapter->pdev; + u8 __iomem *addr; +- int pcicfg_reg; ++ int pcicfg_reg, db_reg; + +- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), +- pci_resource_len(adapter->pdev, 2)); +- if (addr == NULL) +- return -ENOMEM; +- adapter->csr = addr; ++ if (lancer_chip(adapter)) { ++ addr = ioremap_nocache(pci_resource_start(pdev, 0), ++ pci_resource_len(adapter->pdev, 0)); ++ if (addr == NULL) ++ return -ENOMEM; ++ adapter->db = addr; ++ return 0; ++ } + +- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), +- 128 * 1024); +- if (addr == NULL) +- goto pci_map_err; +- adapter->db = addr; ++ if (be_physfn(adapter)) { ++ addr = ioremap_nocache(pci_resource_start(pdev, 2), ++ pci_resource_len(pdev, 2)); ++ if (addr == NULL) ++ return -ENOMEM; ++ adapter->csr = addr; ++ adapter->netdev->mem_start = pci_resource_start(pdev, 2); ++ adapter->netdev->mem_end = pci_resource_start(pdev, 2) + ++ pci_resource_len(pdev, 2); ++ } + +- if (adapter->generation == BE_GEN2) ++ if (adapter->generation == BE_GEN2) { + pcicfg_reg = 1; +- else ++ db_reg = 4; ++ } else { + pcicfg_reg = 0; ++ if (be_physfn(adapter)) ++ db_reg = 4; ++ else ++ db_reg = 0; ++ } + +- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg), +- pci_resource_len(adapter->pdev, pcicfg_reg)); ++ addr = ioremap_nocache(pci_resource_start(pdev, db_reg), ++ pci_resource_len(pdev, db_reg)); + if (addr == NULL) + goto pci_map_err; +- adapter->pcicfg = addr; ++ adapter->db = addr; ++ ++ if (be_physfn(adapter)) { ++ addr = ioremap_nocache( ++ pci_resource_start(pdev, pcicfg_reg), ++ pci_resource_len(pdev, pcicfg_reg)); ++ if (addr == NULL) ++ goto pci_map_err; ++ adapter->pcicfg = addr; ++ } else ++ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + + return 0; + pci_map_err: +@@ -1985,40 +3404,69 @@ static void be_ctrl_cleanup(struct be_adapter *adapter) + if (mem->va) + pci_free_consistent(adapter->pdev, mem->size, + mem->va, mem->dma); ++ ++ mem = &adapter->rx_filter; ++ if (mem->va) ++ pci_free_consistent(adapter->pdev, mem->size, ++ mem->va, mem->dma); + } + + static int be_ctrl_init(struct be_adapter *adapter) + { + struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; + struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; ++ struct be_dma_mem *rx_filter = &adapter->rx_filter; + int status; + + status = be_map_pci_bars(adapter); + if (status) +- return status; ++ goto done; + + mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; + mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, + mbox_mem_alloc->size, &mbox_mem_alloc->dma); + if (!mbox_mem_alloc->va) { +- be_unmap_pci_bars(adapter); +- return -1; ++ status = -ENOMEM; ++ goto unmap_pci_bars; + } ++ + mbox_mem_align->size = sizeof(struct be_mcc_mailbox); + mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); + mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); + memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); +- spin_lock_init(&adapter->mbox_lock); ++ ++ rx_filter->size = sizeof(struct be_cmd_req_rx_filter); ++ rx_filter->va = pci_alloc_consistent(adapter->pdev, rx_filter->size, ++ &rx_filter->dma); ++ if (rx_filter->va == NULL) { ++ status = -ENOMEM; ++ goto free_mbox; ++ } ++ memset(rx_filter->va, 0, rx_filter->size); ++ ++ mutex_init(&adapter->mbox_lock); + spin_lock_init(&adapter->mcc_lock); + spin_lock_init(&adapter->mcc_cq_lock); + ++ init_completion(&adapter->flash_compl); ++ ++ PCI_SAVE_STATE(adapter->pdev); + return 0; ++ ++free_mbox: ++ pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, ++ mbox_mem_alloc->va, mbox_mem_alloc->dma); ++ ++unmap_pci_bars: ++ be_unmap_pci_bars(adapter); ++ ++done: ++ return status; + } + + static void be_stats_cleanup(struct be_adapter *adapter) + { +- struct be_stats_obj *stats = &adapter->stats; +- struct be_dma_mem *cmd = &stats->cmd; ++ struct be_dma_mem *cmd = &adapter->stats_cmd; + + if (cmd->va) + pci_free_consistent(adapter->pdev, cmd->size, +@@ -2027,10 +3475,12 @@ static void be_stats_cleanup(struct be_adapter *adapter) + + static int be_stats_init(struct be_adapter *adapter) + { +- struct be_stats_obj *stats = &adapter->stats; +- struct be_dma_mem *cmd = &stats->cmd; ++ struct be_dma_mem *cmd = &adapter->stats_cmd; + +- cmd->size = sizeof(struct be_cmd_req_get_stats); ++ if (adapter->generation == BE_GEN2) ++ cmd->size = sizeof(struct be_cmd_req_get_stats_v0); ++ else ++ cmd->size = sizeof(struct be_cmd_req_get_stats_v1); + cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); + if (cmd->va == NULL) + return -1; +@@ -2041,9 +3491,17 @@ static int be_stats_init(struct be_adapter *adapter) + static void __devexit be_remove(struct pci_dev *pdev) + { + struct be_adapter *adapter = pci_get_drvdata(pdev); ++ + if (!adapter) + return; + ++ cancel_delayed_work_sync(&adapter->work); ++ ++#ifdef CONFIG_PALAU ++ be_sysfs_remove_group(adapter); ++#endif ++ ++ /* be_close() gets called if the device is open by unregister */ + unregister_netdev(adapter->netdev); + + be_clear(adapter); +@@ -2052,36 +3510,203 @@ static void __devexit be_remove(struct pci_dev *pdev) + + be_ctrl_cleanup(adapter); + +- if (adapter->msix_enabled) { +- pci_disable_msix(adapter->pdev); +- adapter->msix_enabled = false; +- } ++ kfree(adapter->vf_cfg); ++ be_sriov_disable(adapter); ++ ++ be_msix_disable(adapter); + + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + pci_disable_device(pdev); +- ++ be_netif_napi_del(adapter->netdev); + free_netdev(adapter->netdev); + } + +-static int be_hw_up(struct be_adapter *adapter) ++static void be_pcie_slot_check(struct be_adapter *adapter) ++{ ++ u32 curr, max, width, max_wd, speed, max_sp; ++ ++ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_STATUS_OFFSET, ++ &curr); ++ width = (curr >> PCIE_LINK_STATUS_NEG_WIDTH_SHIFT) & ++ PCIE_LINK_STATUS_NEG_WIDTH_MASK; ++ speed = (curr >> PCIE_LINK_STATUS_SPEED_SHIFT) & ++ PCIE_LINK_STATUS_SPEED_MASK; ++ ++ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_CAP_OFFSET, ++ &max); ++ max_wd = (max >> PCIE_LINK_CAP_MAX_WIDTH_SHIFT) & ++ PCIE_LINK_CAP_MAX_WIDTH_MASK; ++ max_sp = (max >> PCIE_LINK_CAP_MAX_SPEED_SHIFT) & ++ PCIE_LINK_CAP_MAX_SPEED_MASK; ++ ++ if (width < max_wd || speed < max_sp) ++ dev_warn(&adapter->pdev->dev, ++ "Found network device in a Gen%s x%d PCIe slot. It " ++ "should be in a Gen2 x%d slot for best performance\n", ++ speed < max_sp ? "1" : "2", width, max_wd); ++} ++ ++static int be_get_ioctl_version(char *fw_version) { ++ char *str[4]; ++ int i; ++ int val[4]; ++ char *endptr; ++ ++ if(!fw_version) ++ return 0; ++ for(i=0; i<3; i++) { ++ str[i] = strsep(&fw_version, "."); ++ val[i] = simple_strtol(str[i], &endptr, 10); ++ } ++ ++ if (val[0]>4 || (val[0]>3 && val[2]>143)) ++ return 1; ++ return 0; ++} ++ ++static int be_get_port_names(struct be_adapter *adapter) + { + int status; ++ int ver; + +- status = be_cmd_POST(adapter); ++ status = be_cmd_get_fw_ver(adapter, ++ adapter->fw_ver, NULL); + if (status) + return status; ++ ver = be_get_ioctl_version(adapter->fw_ver); ++ if (ver && (adapter->generation == BE_GEN3)) ++ status = be_cmd_query_port_names_v1(adapter, ++ adapter->port_name); ++ else ++ status = be_cmd_query_port_names_v0(adapter, ++ adapter->port_name); ++ return status; ++} + +- status = be_cmd_reset_function(adapter); ++static int be_get_config(struct be_adapter *adapter) ++{ ++ int status; ++ u8 mac[ETH_ALEN]; ++ ++ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, ++ &adapter->function_mode, ++ &adapter->function_caps); + if (status) + return status; + +- status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); ++ status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + +- status = be_cmd_query_fw_cfg(adapter, +- &adapter->port_num, &adapter->cap); ++ memset(mac, 0, ETH_ALEN); ++ be_pcie_slot_check(adapter); ++ ++ if (be_physfn(adapter)) { ++ status = be_cmd_mac_addr_query(adapter, mac, ++ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0); ++ ++ if (status) ++ return status; ++ ++ if (!is_valid_ether_addr(mac)) ++ return -EADDRNOTAVAIL; ++ ++ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); ++ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); ++ } ++ ++ if (adapter->function_mode & FLEX10_MODE) ++ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8; ++ else ++ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; ++ ++ return 0; ++} ++ ++static int be_dev_family_check(struct be_adapter *adapter) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ u32 sli_intf = 0, if_type; ++ ++ switch (pdev->device) { ++ case BE_DEVICE_ID1: ++ case OC_DEVICE_ID1: ++ adapter->generation = BE_GEN2; ++ break; ++ case BE_DEVICE_ID2: ++ case OC_DEVICE_ID2: ++ adapter->generation = BE_GEN3; ++ break; ++ case OC_DEVICE_ID3: ++ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf); ++ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> ++ SLI_INTF_IF_TYPE_SHIFT; ++ ++ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) || ++ if_type != 0x02) { ++ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n"); ++ return -EINVAL; ++ } ++ if (num_vfs > 0) { ++ dev_err(&pdev->dev, "VFs not supported\n"); ++ return -EINVAL; ++ } ++ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >> ++ SLI_INTF_FAMILY_SHIFT); ++ adapter->generation = BE_GEN3; ++ break; ++ default: ++ adapter->generation = 0; ++ } ++ return 0; ++} ++ ++static int lancer_wait_ready(struct be_adapter *adapter) ++{ ++#define SLIPORT_READY_TIMEOUT 500 ++ u32 sliport_status; ++ int status = 0, i; ++ ++ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { ++ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); ++ if (sliport_status & SLIPORT_STATUS_RDY_MASK) ++ break; ++ ++ msleep(20); ++ } ++ ++ if (i == SLIPORT_READY_TIMEOUT) ++ status = -1; ++ ++ return status; ++} ++ ++static int lancer_test_and_set_rdy_state(struct be_adapter *adapter) ++{ ++ int status; ++ u32 sliport_status, err, reset_needed; ++ status = lancer_wait_ready(adapter); ++ if (!status) { ++ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); ++ err = sliport_status & SLIPORT_STATUS_ERR_MASK; ++ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; ++ if (err && reset_needed) { ++ iowrite32(SLI_PORT_CONTROL_IP_MASK, ++ adapter->db + SLIPORT_CONTROL_OFFSET); ++ ++ /* check adapter has corrected the error */ ++ status = lancer_wait_ready(adapter); ++ sliport_status = ioread32(adapter->db + ++ SLIPORT_STATUS_OFFSET); ++ sliport_status &= (SLIPORT_STATUS_ERR_MASK | ++ SLIPORT_STATUS_RN_MASK); ++ if (status || sliport_status) ++ status = -1; ++ } else if (err || reset_needed) { ++ status = -1; ++ } ++ } + return status; + } + +@@ -2091,7 +3716,7 @@ static int __devinit be_probe(struct pci_dev *pdev, + int status = 0; + struct be_adapter *adapter; + struct net_device *netdev; +- u8 mac[ETH_ALEN]; ++ u32 en; + + status = pci_enable_device(pdev); + if (status) +@@ -2102,31 +3727,22 @@ static int __devinit be_probe(struct pci_dev *pdev, + goto disable_dev; + pci_set_master(pdev); + +- netdev = alloc_etherdev(sizeof(struct be_adapter)); ++ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS); + if (netdev == NULL) { + status = -ENOMEM; + goto rel_reg; + } + adapter = netdev_priv(netdev); + +- switch (pdev->device) { +- case BE_DEVICE_ID1: +- case OC_DEVICE_ID1: +- adapter->generation = BE_GEN2; +- break; +- case BE_DEVICE_ID2: +- case OC_DEVICE_ID2: +- adapter->generation = BE_GEN3; +- break; +- default: +- adapter->generation = 0; +- } +- + adapter->pdev = pdev; ++ ++ status = be_dev_family_check(adapter); ++ if (status) ++ goto free_netdev; ++ + pci_set_drvdata(pdev, adapter); + adapter->netdev = netdev; +- +- be_msix_enable(adapter); ++ SET_NETDEV_DEV(netdev, &pdev->dev); + + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!status) { +@@ -2139,46 +3755,150 @@ static int __devinit be_probe(struct pci_dev *pdev, + } + } + ++ be_sriov_enable(adapter); ++ if (adapter->num_vfs > 0) { ++ adapter->vf_cfg = kcalloc(adapter->num_vfs, ++ sizeof(struct be_vf_cfg), GFP_KERNEL); ++ ++ if (!adapter->vf_cfg) ++ goto free_netdev; ++ } ++ + status = be_ctrl_init(adapter); + if (status) +- goto free_netdev; ++ goto free_vf_cfg; ++ ++ if (lancer_chip(adapter)) { ++ status = lancer_test_and_set_rdy_state(adapter); ++ if (status) { ++ dev_err(&pdev->dev, "Adapter in non recoverable error\n"); ++ goto ctrl_clean; ++ } ++ } ++ ++ /* sync up with fw's ready state */ ++ if (be_physfn(adapter)) { ++ status = be_cmd_POST(adapter); ++ if (status) ++ goto ctrl_clean; ++ } ++ ++ /* tell fw we're ready to fire cmds */ ++ status = be_cmd_fw_init(adapter); ++ if (status) ++ goto ctrl_clean; ++ ++ status = be_cmd_reset_function(adapter); ++ if (status) ++ goto ctrl_clean; + + status = be_stats_init(adapter); + if (status) + goto ctrl_clean; + +- status = be_hw_up(adapter); ++ status = be_get_config(adapter); + if (status) + goto stats_clean; + +- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, +- true /* permanent */, 0); +- if (status) +- goto stats_clean; +- memcpy(netdev->dev_addr, mac, ETH_ALEN); ++ /* This bit is zero in normal boot case, but in crash kernel case this ++ is not cleared. clear this bit here, until we are ready with the irqs ++ i.e in be_open call.*/ ++ if (!lancer_chip(adapter)) ++ be_intr_set(adapter, false); ++ ++ if (msix) ++ be_msix_enable(adapter); + + INIT_DELAYED_WORK(&adapter->work, be_worker); +- be_netdev_init(netdev); +- SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + + status = be_setup(adapter); + if (status) +- goto stats_clean; ++ goto msix_disable; ++ ++ /* Initilize the link status to -1 */ ++ adapter->link_status = -1; ++ ++ status = be_netdev_init(netdev); ++ if (status) ++ goto unsetup; ++ + status = register_netdev(netdev); + if (status != 0) + goto unsetup; + +- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); ++ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); ++ ++ if (be_physfn(adapter) && adapter->num_vfs) { ++ u8 mac_speed; ++ int link_status; ++ u16 def_vlan, vf, lnk_speed; ++ ++ status = be_vf_eth_addr_config(adapter); ++ if (status) ++ goto unreg_netdev; ++ ++ for (vf = 0; vf < adapter->num_vfs; vf++) { ++ status = be_cmd_get_hsw_config(adapter, &def_vlan, ++ vf + 1, adapter->vf_cfg[vf].vf_if_handle); ++ if (!status) ++ adapter->vf_cfg[vf].vf_def_vid = def_vlan; ++ else ++ goto unreg_netdev; ++ ++ status = be_cmd_link_status_query(adapter, &link_status, ++ &mac_speed, &lnk_speed, vf + 1); ++ if (!status) ++ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10; ++ else ++ goto unreg_netdev; ++ } ++ } ++ if (be_physfn(adapter)) { ++ /* Temp fix ofr bug# 23034. Till ARM ++ * f/w fixes privilege lvl */ ++ be_get_port_names(adapter); ++ } ++ ++ /* Enable Vlan capability based on privileges. ++ * PF will have Vlan capability anyway. */ ++ be_cmd_get_fn_privileges(adapter, &en, 0); ++ ++ if ((en & (BE_PRIV_FILTMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG)) || ++ be_physfn(adapter)) ++ netdev->features |= NETIF_F_HW_VLAN_FILTER; ++ else ++ netdev->features |= NETIF_F_VLAN_CHALLENGED; ++ ++ dev_info(&pdev->dev, "%s: numa node %d\n", netdev->name, ++ dev_to_node(&pdev->dev)); ++ dev_info(&pdev->dev, "%s %s \"%s\" port %d\n", nic_name(pdev), ++ (adapter->port_num > 1 ? "1Gbps NIC" : "10Gbps NIC"), ++ adapter->model_number, adapter->hba_port_num); ++ ++ ++#ifdef CONFIG_PALAU ++ be_sysfs_create_group(adapter); ++#endif ++ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); + return 0; + ++unreg_netdev: ++ unregister_netdev(netdev); + unsetup: + be_clear(adapter); ++msix_disable: ++ be_msix_disable(adapter); + stats_clean: + be_stats_cleanup(adapter); + ctrl_clean: + be_ctrl_cleanup(adapter); ++free_vf_cfg: ++ kfree(adapter->vf_cfg); + free_netdev: +- free_netdev(adapter->netdev); ++ be_sriov_disable(adapter); ++ be_netif_napi_del(netdev); ++ free_netdev(netdev); ++ pci_set_drvdata(pdev, NULL); + rel_reg: + pci_release_regions(pdev); + disable_dev: +@@ -2193,6 +3913,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) + struct be_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + ++ cancel_delayed_work_sync(&adapter->work); ++ if (adapter->wol) ++ be_setup_wol(adapter, true); ++ + netif_device_detach(netdev); + if (netif_running(netdev)) { + rtnl_lock(); +@@ -2202,6 +3926,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) + be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc); + be_clear(adapter); + ++ be_msix_disable(adapter); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); +@@ -2223,6 +3948,12 @@ static int be_resume(struct pci_dev *pdev) + pci_set_power_state(pdev, 0); + pci_restore_state(pdev); + ++ be_msix_enable(adapter); ++ /* tell fw we're ready to fire cmds */ ++ status = be_cmd_fw_init(adapter); ++ if (status) ++ return status; ++ + be_setup(adapter); + if (netif_running(netdev)) { + rtnl_lock(); +@@ -2230,28 +3961,152 @@ static int be_resume(struct pci_dev *pdev) + rtnl_unlock(); + } + netif_device_attach(netdev); ++ ++ if (adapter->wol) ++ be_setup_wol(adapter, false); ++ ++ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); + return 0; + } + ++/* ++ * An FLR will stop BE from DMAing any data. ++ */ ++static void be_shutdown(struct pci_dev *pdev) ++{ ++ struct be_adapter *adapter = pci_get_drvdata(pdev); ++ ++ if (!adapter) ++ return; ++ ++ cancel_delayed_work_sync(&adapter->work); ++ ++ netif_device_detach(adapter->netdev); ++ ++ if (adapter->wol) ++ be_setup_wol(adapter, true); ++ ++ be_cmd_reset_function(adapter); ++ ++ pci_disable_device(pdev); ++} ++ ++static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, ++ pci_channel_state_t state) ++{ ++ struct be_adapter *adapter = pci_get_drvdata(pdev); ++ struct net_device *netdev = adapter->netdev; ++ ++ dev_err(&adapter->pdev->dev, "EEH error detected\n"); ++ ++ adapter->eeh_err = true; ++ ++ netif_device_detach(netdev); ++ ++ if (netif_running(netdev)) { ++ rtnl_lock(); ++ be_close(netdev); ++ rtnl_unlock(); ++ } ++ be_clear(adapter); ++ ++ if (state == pci_channel_io_perm_failure) ++ return PCI_ERS_RESULT_DISCONNECT; ++ ++ pci_disable_device(pdev); ++ ++ return PCI_ERS_RESULT_NEED_RESET; ++} ++ ++static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) ++{ ++ struct be_adapter *adapter = pci_get_drvdata(pdev); ++ int status; ++ ++ dev_info(&adapter->pdev->dev, "EEH reset\n"); ++ adapter->eeh_err = false; ++ ++ status = pci_enable_device(pdev); ++ if (status) ++ return PCI_ERS_RESULT_DISCONNECT; ++ ++ pci_set_master(pdev); ++ pci_set_power_state(pdev, 0); ++ pci_restore_state(pdev); ++ ++ /* Check if card is ok and fw is ready */ ++ status = be_cmd_POST(adapter); ++ if (status) ++ return PCI_ERS_RESULT_DISCONNECT; ++ ++ return PCI_ERS_RESULT_RECOVERED; ++} ++ ++static void be_eeh_resume(struct pci_dev *pdev) ++{ ++ int status = 0; ++ struct be_adapter *adapter = pci_get_drvdata(pdev); ++ struct net_device *netdev = adapter->netdev; ++ ++ dev_info(&adapter->pdev->dev, "EEH resume\n"); ++ ++ pci_save_state(pdev); ++ ++ /* tell fw we're ready to fire cmds */ ++ status = be_cmd_fw_init(adapter); ++ if (status) ++ goto err; ++ ++ status = be_setup(adapter); ++ if (status) ++ goto err; ++ ++ if (netif_running(netdev)) { ++ status = be_open(netdev); ++ if (status) ++ goto err; ++ } ++ netif_device_attach(netdev); ++ return; ++err: ++ dev_err(&adapter->pdev->dev, "EEH resume failed\n"); ++ return; ++} ++ ++static struct pci_error_handlers be_eeh_handlers = { ++ .error_detected = be_eeh_err_detected, ++ .slot_reset = be_eeh_reset, ++ .resume = be_eeh_resume, ++}; ++ + static struct pci_driver be_driver = { + .name = DRV_NAME, + .id_table = be_dev_ids, + .probe = be_probe, + .remove = be_remove, + .suspend = be_suspend, +- .resume = be_resume ++ .resume = be_resume, ++ .shutdown = be_shutdown, ++ .err_handler = &be_eeh_handlers + }; + + static int __init be_init_module(void) + { +- if (rx_frag_size != 8192 && rx_frag_size != 4096 +- && rx_frag_size != 2048) { ++ if (rx_frag_size != 8192 && rx_frag_size != 4096 && ++ rx_frag_size != 2048) { + printk(KERN_WARNING DRV_NAME + " : Module param rx_frag_size must be 2048/4096/8192." + " Using 2048\n"); + rx_frag_size = 2048; + } + ++ if (!msix && num_vfs > 0) { ++ printk(KERN_WARNING DRV_NAME ++ " : MSIx required for num_vfs > 0. Ignoring msix=0\n"); ++ msix = 1; ++ } ++ ++ + return pci_register_driver(&be_driver); + } + module_init(be_init_module); +diff --git a/drivers/net/benet/be_misc.c b/drivers/net/benet/be_misc.c +new file mode 100644 +index 0000000..4ab499f +--- /dev/null ++++ b/drivers/net/benet/be_misc.c +@@ -0,0 +1,106 @@ ++/* ++ * Copyright (C) 2005 - 2011 Emulex ++ * All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License version 2 ++ * as published by the Free Software Foundation. The full GNU General ++ * Public License is included in this distribution in the file called COPYING. ++ * ++ * Contact Information: ++ * linux-drivers@emulex.com ++ * ++ * Emulex ++ * 3333 Susan Street ++ * Costa Mesa, CA 92626 ++ */ ++#include "be.h" ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) ++static ssize_t ++flash_fw_store(struct class_device *cd, const char *buf, size_t len) ++{ ++ struct be_adapter *adapter = ++ netdev_priv(container_of(cd, struct net_device, class_dev)); ++ char file_name[ETHTOOL_FLASH_MAX_FILENAME]; ++ int status; ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; ++ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1)); ++ ++ /* Removing new-line char given by sysfs */ ++ file_name[strlen(file_name) - 1] = '\0'; ++ ++ status = be_load_fw(adapter, file_name); ++ if (!status) ++ return len; ++ else ++ return status; ++} ++ ++static CLASS_DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store); ++ ++static struct attribute *benet_attrs[] = { ++ &class_device_attr_flash_fw.attr, ++ NULL, ++}; ++#else ++ ++static ssize_t ++flash_fw_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t len) ++{ ++ struct be_adapter *adapter = ++ netdev_priv(container_of(dev, struct net_device, dev)); ++ char file_name[ETHTOOL_FLASH_MAX_FILENAME]; ++ int status; ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; ++ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1)); ++ ++ /* Removing new-line char given by sysfs */ ++ file_name[strlen(file_name) - 1] = '\0'; ++ ++ status = be_load_fw(adapter, file_name); ++ if (!status) ++ return len; ++ else ++ return status; ++} ++ ++static DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store); ++ ++static struct attribute *benet_attrs[] = { ++ &dev_attr_flash_fw.attr, ++ NULL, ++}; ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) ++#define CLASS_DEV class_dev ++#else ++#define CLASS_DEV dev ++#endif ++ ++static struct attribute_group benet_attr_group = {.attrs = benet_attrs }; ++ ++void be_sysfs_create_group(struct be_adapter *adapter) ++{ ++ int status; ++ ++ status = sysfs_create_group(&adapter->netdev->CLASS_DEV.kobj, ++ &benet_attr_group); ++ if (status) ++ dev_err(&adapter->pdev->dev, "Could not create sysfs group\n"); ++} ++ ++void be_sysfs_remove_group(struct be_adapter *adapter) ++{ ++ sysfs_remove_group(&adapter->netdev->CLASS_DEV.kobj, &benet_attr_group); ++} +diff --git a/drivers/net/benet/be_proc.c b/drivers/net/benet/be_proc.c +new file mode 100644 +index 0000000..0bfdb3b +--- /dev/null ++++ b/drivers/net/benet/be_proc.c +@@ -0,0 +1,513 @@ ++/* ++ * Copyright (C) 2005 - 2011 ServerEngines ++ * All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License version 2 ++ * as published by the Free Software Foundation. The full GNU General ++ * Public License is included in this distribution in the file called COPYING. ++ * ++ * Contact Information: ++ * linux-drivers@serverengines.com ++ * ++ * ServerEngines ++ * 209 N. Fair Oaks Ave ++ * Sunnyvale, CA 94085 ++ */ ++#include <linux/proc_fs.h> ++#include "be.h" ++ ++char *be_adpt_name[] = { ++ "driver/be2net0", ++ "driver/be2net1", ++ "driver/be2net2", ++ "driver/be2net3", ++ "driver/be2net4", ++ "driver/be2net5", ++ "driver/be2net6", ++ "driver/be2net7" ++}; ++ ++#define MAX_BE_DEVICES 8 ++struct proc_dir_entry *be_proc_dir[MAX_BE_DEVICES]; ++ ++/*File to read Eth Ring Information */ ++#define BE_ETH_RING_FILE "eth_ring" ++#define BE_DRVR_STAT_FILE "drvr_stat" ++ ++/* ++ * this file enables user to read a 32 bit CSR register. ++ * to read 32 bit value of a register at offset 0x1234, ++ * first write the offset 0x1234 (echo "0x1234") in ++ * the file and then read the value from this file. ++ * the written offset is latched until another value is written ++ */ ++#define BE_CSR_R_FILE "csrr" ++/* ++ * this file enables user to write to a 32 bit CSR register. ++ * to write a value 0xdeadbeef to a register at offset 0x1234, ++ * write 0x1234 0xdeadbeef (echo "0x1234 0xdeadbeeb") to ++ * the file. ++ */ ++#define BE_CSR_W_FILE "csrw" ++ ++#define BE_PROC_MODE 0600 ++ ++static char read_eth_ring_buf[4096]; ++static int read_eth_ring_count; ++ ++/* ++ * Get Various Eth Ring Properties ++ */ ++static int proc_eth_read_ring(char *page, char **start, ++ off_t off, int count, int *eof, void *data) ++{ ++ int i, n; ++ char *p = read_eth_ring_buf; ++ struct be_adapter *adapter = (struct be_adapter *) data; ++ ++ if (off == 0) { ++ /* Reset read_eth_ring_count */ ++ read_eth_ring_count = 0; ++ ++ n = sprintf(p, " PhyAddr VirtAddr Size TotalEntries ProducerIndex ConsumerIndex NumUsed\n"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, " ------- -------- ---- ------------ ------------- ------------- -------\n"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, "%s", "EthSendRing"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, " %7lx %8p %4u %12u %13u %13u %7u \n", ++ (long) adapter->tx_obj.q.dma_mem.dma, ++ (void *)adapter->tx_obj.q.dma_mem.va, ++ (u32) (adapter->tx_obj.q.len * ++ sizeof(struct be_eth_wrb)), ++ adapter->tx_obj.q.len, adapter->tx_obj.q.head, ++ adapter->tx_obj.q.tail, ++ atomic_read(&adapter->tx_obj.q.used)); ++ ++ p += n; ++ read_eth_ring_count += n; ++ ++ /* Get Eth Send Compl Queue Details */ ++ n = sprintf(p, "%s", "EthSendCmplRing"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n", ++ (long)adapter->tx_obj.cq.dma_mem.dma, ++ (void *)adapter->tx_obj.cq.dma_mem.va, ++ (u32) (adapter->tx_obj.cq.len * ++ sizeof(struct be_eth_tx_compl)), ++ adapter->tx_obj.cq.len, "NA", ++ adapter->tx_obj.cq.tail, "NA"); ++ ++ p += n; ++ read_eth_ring_count += n; ++ /* Get Eth Rx Queue Details */ ++ n = sprintf(p, "%s", "EthRxRing"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, " %7lx %8p %4u %12u %13u %13s %7u \n", ++ (long)adapter->rx_obj.q.dma_mem.dma, ++ (void *)adapter->rx_obj.q.dma_mem.va, ++ (u32) (adapter->rx_obj.q.len * ++ sizeof(struct be_eth_rx_d)), ++ adapter->rx_obj.q.len, adapter->rx_obj.q.head,"NA", ++ atomic_read(&adapter->rx_obj.q.used)); ++ p += n; ++ read_eth_ring_count += n; ++ ++ /* Get Eth Unicast Rx Compl Queue Details */ ++ n = sprintf(p, "%s", "EthRxCmplRing"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n", ++ (long)adapter->rx_obj.cq.dma_mem.dma, ++ (void *)adapter->rx_obj.cq.dma_mem.va, ++ (u32) (adapter->rx_obj.cq.len * ++ sizeof(struct be_eth_rx_compl)), ++ adapter->rx_obj.cq.len, "NA", ++ adapter->rx_obj.cq.tail, "NA"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ /* Get Eth Event Queue Details */ ++ n = sprintf(p, "%s", "EthTxEventRing"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, ++ " %7lx %8p %4u %12u %13s %13u %7s\n", ++ (long) adapter->tx_eq.q.dma_mem.dma, ++ (void *)adapter->tx_eq.q.dma_mem.va, ++ (u32) (adapter->tx_eq.q.len * ++ sizeof(struct be_eq_entry)), ++ adapter->tx_eq.q.len, "NA", ++ adapter->tx_eq.q.tail, "NA"); ++ ++ p += n; ++ read_eth_ring_count += n; ++ ++ /* Get Eth Event Queue Details */ ++ n = sprintf(p, "%s", "EthRxEventRing"); ++ p += n; ++ read_eth_ring_count += n; ++ ++ n = sprintf(p, ++ " %7lx %8p %4u %12u %13s %13u %7s\n", ++ (long) adapter->rx_eq.q.dma_mem.dma, ++ (void *)adapter->rx_eq.q.dma_mem.va, ++ (u32) (adapter->rx_eq.q.len * ++ sizeof(struct be_eq_entry)), ++ adapter->rx_eq.q.len, "NA", ++ adapter->rx_eq.q.tail, "NA"); ++ ++ p += n; ++ read_eth_ring_count += n; ++ } ++ ++ *start = page; ++ /* copy whatever we can */ ++ if (count < (read_eth_ring_count - off)) { ++ i = count; ++ *eof = 0; /* More bytes left */ ++ } else { ++ i = read_eth_ring_count - off; ++ *eof = 1; /* Nothing left. indicate EOF */ ++ } ++ ++ memcpy(page, read_eth_ring_buf + off, i); ++ return (i); ++} ++ ++static int proc_eth_write_ring(struct file *file, ++ const char *buffer, unsigned long count, ++ void *data) ++{ ++ return (count); /* we do not support write */ ++} ++ ++/* ++ * read the driver stats. ++ */ ++static int proc_read_drvr_stat(char *page, char **start, ++ off_t off, int count, int *eof, void *data) ++{ ++ int n, lro_cp; ++ char *p = page; ++ struct be_adapter *adapter = (struct be_adapter *) data; ++ struct net_device *netdev = adapter->netdev; ++ ++ if (off == 0) { ++ n = sprintf(p, "interface = %s\n", netdev->name); ++ p += n; ++ n = sprintf(p, "tx_reqs = %d\n", ++ drvr_stats(adapter)->be_tx_reqs); ++ p += n; ++ n = sprintf(p, "tx_stops = %d\n", ++ drvr_stats(adapter)->be_tx_stops); ++ p += n; ++ n = sprintf(p, "fwd_reqs = %d\n", ++ drvr_stats(adapter)->be_fwd_reqs); ++ p += n; ++ n = sprintf(p, "tx_wrbs = %d\n", ++ drvr_stats(adapter)->be_tx_wrbs); ++ p += n; ++ n = sprintf(p, "rx_poll = %d\n", drvr_stats(adapter)->be_rx_polls); ++ p += n; ++ n = sprintf(p, "tx_events = %d\n", ++ drvr_stats(adapter)->be_tx_events); ++ p += n; ++ n = sprintf(p, "rx_events = %d\n", ++ drvr_stats(adapter)->be_rx_events); ++ p += n; ++ n = sprintf(p, "tx_compl = %d\n", ++ drvr_stats(adapter)->be_tx_compl); ++ p += n; ++ n = sprintf(p, "rx_compl = %d\n", ++ drvr_stats(adapter)->be_rx_compl); ++ p += n; ++ n = sprintf(p, "ethrx_post_fail = %d\n", ++ drvr_stats(adapter)->be_ethrx_post_fail); ++ p += n; ++ n = sprintf(p, "802.3_dropped_frames = %d\n", ++ drvr_stats(adapter)->be_802_3_dropped_frames); ++ p += n; ++ n = sprintf(p, "802.3_malformed_frames = %d\n", ++ drvr_stats(adapter)->be_802_3_malformed_frames); ++ p += n; ++ n = sprintf(p, "eth_tx_rate = %d\n", ++ drvr_stats(adapter)->be_tx_rate); ++ p += n; ++ n = sprintf(p, "eth_rx_rate = %d\n", ++ drvr_stats(adapter)->be_rx_rate); ++ p += n; ++ ++ lro_cp = (drvr_stats(adapter)->be_lro_hgram_data[0] + ++ drvr_stats(adapter)->be_lro_hgram_data[1] + ++ drvr_stats(adapter)->be_lro_hgram_data[2] + ++ drvr_stats(adapter)->be_lro_hgram_data[3] + ++ drvr_stats(adapter)->be_lro_hgram_data[4] + ++ drvr_stats(adapter)->be_lro_hgram_data[5] + ++ drvr_stats(adapter)->be_lro_hgram_data[6] + ++ drvr_stats(adapter)->be_lro_hgram_data[7])/100; ++ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */ ++ n = sprintf(p, ++ "LRO data count %% histogram (1, 2-3, 4-5,..,>=16) = " ++ "%d, %d, %d, %d - %d, %d, %d, %d\n", ++ drvr_stats(adapter)->be_lro_hgram_data[0]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_data[1]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_data[2]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_data[3]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_data[4]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_data[5]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_data[6]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_data[7]/lro_cp); ++ p += n; ++ ++ lro_cp = (drvr_stats(adapter)->be_lro_hgram_ack[0] + ++ drvr_stats(adapter)->be_lro_hgram_ack[1] + ++ drvr_stats(adapter)->be_lro_hgram_ack[2] + ++ drvr_stats(adapter)->be_lro_hgram_ack[3] + ++ drvr_stats(adapter)->be_lro_hgram_ack[4] + ++ drvr_stats(adapter)->be_lro_hgram_ack[5] + ++ drvr_stats(adapter)->be_lro_hgram_ack[6] + ++ drvr_stats(adapter)->be_lro_hgram_ack[7])/100; ++ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */ ++ n = sprintf(p, ++ "LRO ack count %% histogram (1, 2-3, 4-5,..,>=16) = " ++ "%d, %d, %d, %d - %d, %d, %d, %d\n", ++ drvr_stats(adapter)->be_lro_hgram_ack[0]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_ack[1]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_ack[2]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_ack[3]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_ack[4]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_ack[5]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_ack[6]/lro_cp, ++ drvr_stats(adapter)->be_lro_hgram_ack[7]/lro_cp); ++ p += n; ++ n = sprintf(p, "rx_eq_delay = %d \n", adapter->rx_eq.cur_eqd); ++ p += n; ++ n = sprintf(p, "rx frags per sec=%d \n", ++ drvr_stats(adapter)->be_rx_fps); ++ p += n; ++ ++ } ++ *eof = 1; ++ return (p - page); ++} ++ ++static int proc_write_drvr_stat(struct file *file, ++ const char *buffer, unsigned long count, ++ void *data) ++{ ++ struct be_adapter *adapter = (struct be_adapter *) data; ++ ++ memset(&(adapter->stats.drvr_stats), 0, ++ sizeof(adapter->stats.drvr_stats)); ++ return (count); /* we do not support write */ ++} ++ ++#if 0 ++/* the following are some of the functions that are needed here ++ * until all initializations are done by MPU. ++ */ ++ ++u32 ++CsrReadDr(void* BaseAddress, u32 Offset) ++{ ++ u32 *rp; ++ ++ rp = (u32 *) (((u8 *) BaseAddress) + Offset); ++ return (*rp); ++} ++ ++/*! ++ ++@brief ++ This routine writes to a register located within the CSR ++ space for a given function object. ++ ++@param ++ FuncObj - Pointer to the function object to read from. ++ ++@param ++ Offset - The Offset (in bytes) to write to within the function's CSR space. ++ ++@param ++ Value - The value to write to the register. ++ ++@return ++ ++@note ++ IRQL: any ++ ++*/ ++void ++CsrWriteDr(void* BaseAddress, u32 Offset, u32 Value) ++{ ++ u32 *Register; ++ ++ Register = (u32 *) (((u8 *) BaseAddress) + Offset); ++ ++ //TRACE(DL_INFO, "CsrWrite[ %X ] <= %X", Register, Value); ++ *Register = Value; ++} ++u32 be_proc_csrr_offset = -1; /* to latch the offset of next CSR Read req. */ ++ ++/* ++ * read the csr_r file. return the 32 bit register value from ++ * CSR space at offset latched in the global location ++ * be_proc_csrr_offset ++ */ ++static int proc_read_csr_r(char *page, char **start, ++ off_t off, int count, int *eof, void *data) ++{ ++ struct be_adapter * adapter = (struct be_adapter *)data; ++ u32 val; ++ int n = 0; ++ if (be_proc_csrr_offset == -1) ++ return -EINVAL; ++ ++ if (off == 0) { ++ /* read the CSR at offset be_proc_csrr_offset and return */ ++ val = CsrReadDr(adapter->csr_va, be_proc_csrr_offset); ++ n = sprintf(page, "0x%x\n", val); ++ } ++ *eof = 1; ++ return n; ++} ++ ++/* ++ * save the written value in be_proc_csrr_offset for next ++ * read from the file ++ */ ++static int proc_write_csr_r(struct file *file, ++ const char *buffer, unsigned long count, void *data) ++{ ++ char buf[64]; ++ u32 n; ++ ++ if (count > sizeof(buf) + 1) ++ return -EINVAL; ++ if (copy_from_user(buf, buffer, count)) ++ return -EFAULT; ++ buf[count] = '\0'; ++ ++ n = simple_strtoul(buf, NULL, 16); ++ if (n < 0x50000) ++ be_proc_csrr_offset = n; ++ return (count); ++} ++ ++/* ++ * return the latched offset for reading the csr_r file. ++ */ ++static int proc_read_csr_w(char *page, char **start, ++ off_t off, int count, int *eof, void *data) ++{ ++ ++ *eof = 1; ++ return sprintf(page, "0x%x\n", be_proc_csrr_offset); ++} ++ ++/* ++ * the incoming string is of the form "<offset> <value>" ++ * where the offset is the offset of the register to be written ++ * and value is the value to be written. ++ */ ++static int proc_write_csr_w(struct file *file, ++ const char *buffer, unsigned long count, ++ void *data) ++{ ++ char buf[64]; ++ char *p; ++ u32 n, val; ++ struct be_adapter * adapter = (struct be_adapter *)data; ++ ++ if (count > sizeof(buf) + 1) ++ return -EINVAL; ++ if (copy_from_user(buf, buffer, count)) ++ return -EFAULT; ++ buf[count] = '\0'; ++ ++ n = simple_strtoul(buf, &p, 16); ++ if (n > 0x50000) ++ return -EINVAL; ++ ++ /* now get the actual value to be written */ ++ while (*p == ' ' || *p == '\t') ++ p++; ++ val = simple_strtoul(p, NULL, 16); ++ CsrWriteDr(adapter->csr_va, n, val); ++ return (count); ++} ++#endif ++ ++void be_init_procfs(struct be_adapter *adapter, int adapt_num) ++{ ++ static struct proc_dir_entry *pde; ++ ++ if (adapt_num > MAX_BE_DEVICES - 1) ++ return; ++ ++ /* create directory */ ++ be_proc_dir[adapt_num] = ++ proc_mkdir(be_adpt_name[adapt_num], NULL); ++ if (be_proc_dir[adapt_num]) { ++ (be_proc_dir[adapt_num])->owner = THIS_MODULE; ++ } ++ ++ pde = create_proc_entry(BE_ETH_RING_FILE, BE_PROC_MODE, ++ be_proc_dir[adapt_num]); ++ if (pde) { ++ pde->read_proc = proc_eth_read_ring; ++ pde->write_proc = proc_eth_write_ring; ++ pde->data = adapter; ++ pde->owner = THIS_MODULE; ++ } ++ ++ pde = create_proc_entry(BE_DRVR_STAT_FILE, BE_PROC_MODE, ++ be_proc_dir[adapt_num]); ++ if (pde) { ++ pde->read_proc = proc_read_drvr_stat; ++ pde->write_proc = proc_write_drvr_stat; ++ pde->data = adapter; ++ pde->owner = THIS_MODULE; ++ } ++ ++#if 0 ++ if ((pde = create_proc_entry(BE_CSR_R_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) { ++ pde->read_proc = proc_read_csr_r; ++ pde->write_proc = proc_write_csr_r; ++ pde->data = adapter; ++ pde->owner = THIS_MODULE; ++ } ++ ++ if ((pde = create_proc_entry(BE_CSR_W_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) { ++ pde->read_proc = proc_read_csr_w; ++ pde->write_proc = proc_write_csr_w; ++ pde->data = adapter; ++ pde->owner = THIS_MODULE; ++ } ++#endif ++} ++ ++void be_cleanup_procfs(struct be_adapter *adapter, int adapt_num) ++{ ++ if (adapt_num > MAX_BE_DEVICES - 1) ++ return; ++ remove_proc_entry(BE_ETH_RING_FILE, be_proc_dir[adapt_num]); ++ remove_proc_entry(BE_DRVR_STAT_FILE, be_proc_dir[adapt_num]); ++ remove_proc_entry(BE_CSR_R_FILE, be_proc_dir[adapt_num]); ++ remove_proc_entry(BE_CSR_W_FILE, be_proc_dir[adapt_num]); ++ remove_proc_entry(be_adpt_name[adapt_num], NULL); ++} +diff --git a/drivers/net/benet/version.h b/drivers/net/benet/version.h +new file mode 100644 +index 0000000..c7ed692 +--- /dev/null ++++ b/drivers/net/benet/version.h +@@ -0,0 +1,51 @@ ++#define STR_BE_BRANCH "0"
++#define STR_BE_BUILD "479"
++#define STR_BE_DOT "0"
++#define STR_BE_MINOR "0"
++#define STR_BE_MAJOR "4"
++
++#define BE_BRANCH 0
++#define BE_BUILD 479
++#define BE_DOT 0
++#define BE_MINOR 0
++#define BE_MAJOR 4
++
++#define MGMT_BRANCH 0
++#define MGMT_BUILDNUM 479
++#define MGMT_MINOR 0
++#define MGMT_MAJOR 4
++
++#define BE_REDBOOT_VERSION "2.0.5.0"
++
++//start-auto
++#define BUILD_MONTH "12"
++#define BUILD_MONTH_NAME "December"
++#define BUILD_DAY "6"
++#define BUILD_YEAR "2011"
++#define BUILD_24HOUR "21"
++#define BUILD_12HOUR "9"
++#define BUILD_AM_PM "PM"
++#define BUILD_MIN "48"
++#define BUILD_SEC "05"
++#define BUILD_MONTH_NUMBER 12
++#define BUILD_DAY_NUMBER 6
++#define BUILD_YEAR_NUMBER 2011
++#define BUILD_24HOUR_NUMBER 21
++#define BUILD_12HOUR_NUMBER 9
++#define BUILD_MIN_NUMBER 48
++#define BUILD_SEC_NUMBER 5
++#undef MAJOR_BUILD
++#undef MINOR_BUILD
++#undef DOT_BUILD
++#define NUMBERED_BUILD
++#undef BRANCH_BUILD
++//end-auto
++
++#define ELX_FCOE_XROM_BIOS_VER "7.03a1"
++#define ELX_FCoE_X86_VER "4.02a1"
++#define ELX_FCoE_EFI_VER "5.01a1"
++#define ELX_FCoE_FCODE_VER "4.01a0"
++#define ELX_PXE_BIOS_VER "3.00a5"
++#define ELX_UEFI_NIC_VER "2.10A10"
++#define ELX_UEFI_FCODE_VER "1.10A0"
++#define ELX_ISCSI_BIOS_VER "1.00A8"
+diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c +index 4874b2b..67f8526 100644 +--- a/drivers/net/bnx2.c ++++ b/drivers/net/bnx2.c +@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp) + int rc = 0; + u32 magic, csum; + ++ pax_track_stack(); ++ + if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) + goto test_nvram_done; + +diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h +index fd3eb07..8a6978d 100644 +--- a/drivers/net/cxgb3/l2t.h ++++ b/drivers/net/cxgb3/l2t.h +@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev, + */ + struct l2t_skb_cb { + arp_failure_handler_func arp_failure_handler; +-}; ++} __no_const; + + #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c +index 032cfe0..411af379 100644 +--- a/drivers/net/cxgb3/t3_hw.c ++++ b/drivers/net/cxgb3/t3_hw.c +@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) + int i, addr, ret; + struct t3_vpd vpd; + ++ pax_track_stack(); ++ + /* + * Card information is normally at VPD_BASE but some early cards had + * it at 0. +diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c +index 7fa7a90..fef924d 100644 +--- a/drivers/net/dl2k.c ++++ b/drivers/net/dl2k.c +@@ -1279,55 +1279,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) + { + int phy_addr; + struct netdev_private *np = netdev_priv(dev); +- struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; +- +- struct netdev_desc *desc; +- int i; ++ struct mii_ioctl_data *miidata = if_mii(rq); + + phy_addr = np->phy_addr; + switch (cmd) { +- case SIOCDEVPRIVATE: ++ case SIOCGMIIPHY: ++ miidata->phy_id = phy_addr; + break; +- +- case SIOCDEVPRIVATE + 1: +- miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num); ++ case SIOCGMIIREG: ++ miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); + break; +- case SIOCDEVPRIVATE + 2: +- mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value); ++ case SIOCSMIIREG: ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); + break; +- case SIOCDEVPRIVATE + 3: +- break; +- case SIOCDEVPRIVATE + 4: +- break; +- case SIOCDEVPRIVATE + 5: +- netif_stop_queue (dev); +- break; +- case SIOCDEVPRIVATE + 6: +- netif_wake_queue (dev); +- break; +- case SIOCDEVPRIVATE + 7: +- printk +- ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n", +- netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx, +- np->old_rx); +- break; +- case SIOCDEVPRIVATE + 8: +- printk("TX ring:\n"); +- for (i = 0; i < TX_RING_SIZE; i++) { +- desc = &np->tx_ring[i]; +- printk +- ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x", +- i, +- (u32) (np->tx_ring_dma + i * sizeof (*desc)), +- (u32)le64_to_cpu(desc->next_desc), +- (u32)le64_to_cpu(desc->status), +- (u32)(le64_to_cpu(desc->fraginfo) >> 32), +- (u32)le64_to_cpu(desc->fraginfo)); +- printk ("\n"); +- } +- printk ("\n"); +- break; +- + default: + return -EOPNOTSUPP; + } +diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h +index 266ec87..466b2f5 100644 +--- a/drivers/net/dl2k.h ++++ b/drivers/net/dl2k.h +@@ -471,13 +471,6 @@ struct ioctl_data { + char *data; + }; + +-struct mii_data { +- __u16 reserved; +- __u16 reg_num; +- __u16 in_value; +- __u16 out_value; +-}; +- + /* The Rx and Tx buffer descriptors. */ + struct netdev_desc { + __le64 next_desc; +@@ -551,4 +544,7 @@ MODULE_DEVICE_TABLE (pci, rio_pci_tbl); + #define DEFAULT_RXT 750 + #define DEFAULT_TXC 1 + #define MAX_TXC 8 ++ ++#include <linux/mii.h> ++ + #endif /* __DL2K_H__ */ +diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c +index d1e0563..b9e129c 100644 +--- a/drivers/net/e1000e/82571.c ++++ b/drivers/net/e1000e/82571.c +@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; +- struct e1000_mac_operations *func = &mac->ops; ++ e1000_mac_operations_no_const *func = &mac->ops; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; +@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) + temp = er32(ICRXDMTC); + } + +-static struct e1000_mac_operations e82571_mac_ops = { ++static const struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init, +@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = { + .setup_led = e1000e_setup_led_generic, + }; + +-static struct e1000_phy_operations e82_phy_ops_igp = { ++static const struct e1000_phy_operations e82_phy_ops_igp = { + .acquire_phy = e1000_get_hw_semaphore_82571, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = NULL, +@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = { + .cfg_on_link_up = NULL, + }; + +-static struct e1000_phy_operations e82_phy_ops_m88 = { ++static const struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire_phy = e1000_get_hw_semaphore_82571, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = e1000e_phy_sw_reset, +@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = { + .cfg_on_link_up = NULL, + }; + +-static struct e1000_phy_operations e82_phy_ops_bm = { ++static const struct e1000_phy_operations e82_phy_ops_bm = { + .acquire_phy = e1000_get_hw_semaphore_82571, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = e1000e_phy_sw_reset, +@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = { + .cfg_on_link_up = NULL, + }; + +-static struct e1000_nvm_operations e82571_nvm_ops = { ++static const struct e1000_nvm_operations e82571_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_82571, + .read_nvm = e1000e_read_nvm_eerd, + .release_nvm = e1000_release_nvm_82571, +diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h +index 47db9bd..fa58ccd 100644 +--- a/drivers/net/e1000e/e1000.h ++++ b/drivers/net/e1000e/e1000.h +@@ -375,9 +375,9 @@ struct e1000_info { + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); +- struct e1000_mac_operations *mac_ops; +- struct e1000_phy_operations *phy_ops; +- struct e1000_nvm_operations *nvm_ops; ++ const struct e1000_mac_operations *mac_ops; ++ const struct e1000_phy_operations *phy_ops; ++ const struct e1000_nvm_operations *nvm_ops; + }; + + /* hardware capability, feature, and workaround flags */ +diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c +index ae5d736..e9a93a1 100644 +--- a/drivers/net/e1000e/es2lan.c ++++ b/drivers/net/e1000e/es2lan.c +@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; +- struct e1000_mac_operations *func = &mac->ops; ++ e1000_mac_operations_no_const *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { +@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) + temp = er32(ICRXDMTC); + } + +-static struct e1000_mac_operations es2_mac_ops = { ++static const struct e1000_mac_operations es2_mac_ops = { + .id_led_init = e1000e_id_led_init, + .check_mng_mode = e1000e_check_mng_mode_generic, + /* check_for_link dependent on media type */ +@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = { + .setup_led = e1000e_setup_led_generic, + }; + +-static struct e1000_phy_operations es2_phy_ops = { ++static const struct e1000_phy_operations es2_phy_ops = { + .acquire_phy = e1000_acquire_phy_80003es2lan, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = e1000e_phy_sw_reset, +@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = { + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, + }; + +-static struct e1000_nvm_operations es2_nvm_ops = { ++static const struct e1000_nvm_operations es2_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_80003es2lan, + .read_nvm = e1000e_read_nvm_eerd, + .release_nvm = e1000_release_nvm_80003es2lan, +diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h +index 11f3b7c..6381887 100644 +--- a/drivers/net/e1000e/hw.h ++++ b/drivers/net/e1000e/hw.h +@@ -753,6 +753,7 @@ struct e1000_mac_operations { + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + }; ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; + + /* Function pointers for the PHY. */ + struct e1000_phy_operations { +@@ -774,6 +775,7 @@ struct e1000_phy_operations { + s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*cfg_on_link_up)(struct e1000_hw *); + }; ++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const; + + /* Function pointers for the NVM. */ + struct e1000_nvm_operations { +@@ -785,9 +787,10 @@ struct e1000_nvm_operations { + s32 (*validate_nvm)(struct e1000_hw *); + s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); + }; ++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const; + + struct e1000_mac_info { +- struct e1000_mac_operations ops; ++ e1000_mac_operations_no_const ops; + + u8 addr[6]; + u8 perm_addr[6]; +@@ -823,7 +826,7 @@ struct e1000_mac_info { + }; + + struct e1000_phy_info { +- struct e1000_phy_operations ops; ++ e1000_phy_operations_no_const ops; + + enum e1000_phy_type type; + +@@ -857,7 +860,7 @@ struct e1000_phy_info { + }; + + struct e1000_nvm_info { +- struct e1000_nvm_operations ops; ++ e1000_nvm_operations_no_const ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; +diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c +index de39f9a..e28d3e0 100644 +--- a/drivers/net/e1000e/ich8lan.c ++++ b/drivers/net/e1000e/ich8lan.c +@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) + } + } + +-static struct e1000_mac_operations ich8_mac_ops = { ++static const struct e1000_mac_operations ich8_mac_ops = { + .id_led_init = e1000e_id_led_init, + .check_mng_mode = e1000_check_mng_mode_ich8lan, + .check_for_link = e1000_check_for_copper_link_ich8lan, +@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = { + /* id_led_init dependent on mac type */ + }; + +-static struct e1000_phy_operations ich8_phy_ops = { ++static const struct e1000_phy_operations ich8_phy_ops = { + .acquire_phy = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit_phy = NULL, +@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = { + .write_phy_reg = e1000e_write_phy_reg_igp, + }; + +-static struct e1000_nvm_operations ich8_nvm_ops = { ++static const struct e1000_nvm_operations ich8_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_ich8lan, + .read_nvm = e1000_read_nvm_ich8lan, + .release_nvm = e1000_release_nvm_ich8lan, +diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c +index 18d5fbb..542d96d 100644 +--- a/drivers/net/fealnx.c ++++ b/drivers/net/fealnx.c +@@ -151,7 +151,7 @@ struct chip_info { + int flags; + }; + +-static const struct chip_info skel_netdrv_tbl[] __devinitdata = { ++static const struct chip_info skel_netdrv_tbl[] __devinitconst = { + { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, + { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, + { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c +index 0e5b54b..b503f82 100644 +--- a/drivers/net/hamradio/6pack.c ++++ b/drivers/net/hamradio/6pack.c +@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty, + unsigned char buf[512]; + int count1; + ++ pax_track_stack(); ++ + if (!count) + return; + +diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c +index 5862282..7cce8cb 100644 +--- a/drivers/net/ibmveth.c ++++ b/drivers/net/ibmveth.c +@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = { + NULL, + }; + +-static struct sysfs_ops veth_pool_ops = { ++static const struct sysfs_ops veth_pool_ops = { + .show = veth_pool_show, + .store = veth_pool_store, + }; +diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c +index d617f2d..57b5309 100644 +--- a/drivers/net/igb/e1000_82575.c ++++ b/drivers/net/igb/e1000_82575.c +@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) + wr32(E1000_VT_CTL, vt_ctl); + } + +-static struct e1000_mac_operations e1000_mac_ops_82575 = { ++static const struct e1000_mac_operations e1000_mac_ops_82575 = { + .reset_hw = igb_reset_hw_82575, + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, +@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = { + .get_speed_and_duplex = igb_get_speed_and_duplex_copper, + }; + +-static struct e1000_phy_operations e1000_phy_ops_82575 = { ++static const struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + }; + +-static struct e1000_nvm_operations e1000_nvm_ops_82575 = { ++static const struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, +diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h +index 72081df..d855cf5 100644 +--- a/drivers/net/igb/e1000_hw.h ++++ b/drivers/net/igb/e1000_hw.h +@@ -288,6 +288,7 @@ struct e1000_mac_operations { + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + }; ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; + + struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); +@@ -303,6 +304,7 @@ struct e1000_phy_operations { + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + }; ++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const; + + struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); +@@ -310,6 +312,7 @@ struct e1000_nvm_operations { + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + }; ++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const; + + struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); +@@ -321,7 +324,7 @@ struct e1000_info { + extern const struct e1000_info e1000_82575_info; + + struct e1000_mac_info { +- struct e1000_mac_operations ops; ++ e1000_mac_operations_no_const ops; + + u8 addr[6]; + u8 perm_addr[6]; +@@ -365,7 +368,7 @@ struct e1000_mac_info { + }; + + struct e1000_phy_info { +- struct e1000_phy_operations ops; ++ e1000_phy_operations_no_const ops; + + enum e1000_phy_type type; + +@@ -400,7 +403,7 @@ struct e1000_phy_info { + }; + + struct e1000_nvm_info { +- struct e1000_nvm_operations ops; ++ e1000_nvm_operations_no_const ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; +@@ -446,6 +449,7 @@ struct e1000_mbx_operations { + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); + }; ++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const; + + struct e1000_mbx_stats { + u32 msgs_tx; +@@ -457,7 +461,7 @@ struct e1000_mbx_stats { + }; + + struct e1000_mbx_info { +- struct e1000_mbx_operations ops; ++ e1000_mbx_operations_no_const ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; +diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h +index 1e8ce37..549c453 100644 +--- a/drivers/net/igbvf/vf.h ++++ b/drivers/net/igbvf/vf.h +@@ -187,9 +187,10 @@ struct e1000_mac_operations { + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*set_vfta)(struct e1000_hw *, u16, bool); + }; ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; + + struct e1000_mac_info { +- struct e1000_mac_operations ops; ++ e1000_mac_operations_no_const ops; + u8 addr[6]; + u8 perm_addr[6]; + +@@ -211,6 +212,7 @@ struct e1000_mbx_operations { + s32 (*check_for_ack)(struct e1000_hw *); + s32 (*check_for_rst)(struct e1000_hw *); + }; ++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const; + + struct e1000_mbx_stats { + u32 msgs_tx; +@@ -222,7 +224,7 @@ struct e1000_mbx_stats { + }; + + struct e1000_mbx_info { +- struct e1000_mbx_operations ops; ++ e1000_mbx_operations_no_const ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; +diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c +index aa7286b..a61394f 100644 +--- a/drivers/net/iseries_veth.c ++++ b/drivers/net/iseries_veth.c +@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = { + NULL + }; + +-static struct sysfs_ops veth_cnx_sysfs_ops = { ++static const struct sysfs_ops veth_cnx_sysfs_ops = { + .show = veth_cnx_attribute_show + }; + +@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = { + NULL + }; + +-static struct sysfs_ops veth_port_sysfs_ops = { ++static const struct sysfs_ops veth_port_sysfs_ops = { + .show = veth_port_attribute_show + }; + +diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c +index 8aa44dc..fa1e797 100644 +--- a/drivers/net/ixgb/ixgb_main.c ++++ b/drivers/net/ixgb/ixgb_main.c +@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev) + u32 rctl; + int i; + ++ pax_track_stack(); ++ + /* Check for Promiscuous and All Multicast modes */ + + rctl = IXGB_READ_REG(hw, RCTL); +diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c +index af35e1d..8781785 100644 +--- a/drivers/net/ixgb/ixgb_param.c ++++ b/drivers/net/ixgb/ixgb_param.c +@@ -260,6 +260,9 @@ void __devinit + ixgb_check_options(struct ixgb_adapter *adapter) + { + int bd = adapter->bd_number; ++ ++ pax_track_stack(); ++ + if (bd >= IXGB_MAX_NIC) { + printk(KERN_NOTICE + "Warning: no configuration for board #%i\n", bd); +diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h +index b17aa73..ed74540 100644 +--- a/drivers/net/ixgbe/ixgbe_type.h ++++ b/drivers/net/ixgbe/ixgbe_type.h +@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations { + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + }; ++typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const; + + struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); +@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations { + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *, s32); + }; ++typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const; + + struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); +@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations { + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + }; ++typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const; + + struct ixgbe_eeprom_info { +- struct ixgbe_eeprom_operations ops; ++ ixgbe_eeprom_operations_no_const ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; +@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info { + }; + + struct ixgbe_mac_info { +- struct ixgbe_mac_operations ops; ++ ixgbe_mac_operations_no_const ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; +@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info { + }; + + struct ixgbe_phy_info { +- struct ixgbe_phy_operations ops; ++ ixgbe_phy_operations_no_const ops; + struct mdio_if_info mdio; + enum ixgbe_phy_type type; + u32 id; +diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c +index 291a505..2543756 100644 +--- a/drivers/net/mlx4/main.c ++++ b/drivers/net/mlx4/main.c +@@ -38,6 +38,7 @@ + #include <linux/errno.h> + #include <linux/pci.h> + #include <linux/dma-mapping.h> ++#include <linux/sched.h> + + #include <linux/mlx4/device.h> + #include <linux/mlx4/doorbell.h> +@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) + u64 icm_size; + int err; + ++ pax_track_stack(); ++ + err = mlx4_QUERY_FW(dev); + if (err) { + if (err == -EACCES) +diff --git a/drivers/net/niu.c b/drivers/net/niu.c +index 2dce134..fa5ce75 100644 +--- a/drivers/net/niu.c ++++ b/drivers/net/niu.c +@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) + int i, num_irqs, err; + u8 first_ldg; + ++ pax_track_stack(); ++ + first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; + for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) + ldg_num_map[i] = first_ldg + i; +diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c +index c1b3f09..97cd8c4 100644 +--- a/drivers/net/pcnet32.c ++++ b/drivers/net/pcnet32.c +@@ -79,7 +79,7 @@ static int cards_found; + /* + * VLB I/O addresses + */ +-static unsigned int pcnet32_portlist[] __initdata = ++static unsigned int pcnet32_portlist[] __devinitdata = + { 0x300, 0x320, 0x340, 0x360, 0 }; + + static int pcnet32_debug = 0; +@@ -267,7 +267,7 @@ struct pcnet32_private { + struct sk_buff **rx_skbuff; + dma_addr_t *tx_dma_addr; + dma_addr_t *rx_dma_addr; +- struct pcnet32_access a; ++ struct pcnet32_access *a; + spinlock_t lock; /* Guard lock */ + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int rx_ring_size; /* current rx ring size */ +@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev) + u16 val; + + netif_wake_queue(dev); +- val = lp->a.read_csr(ioaddr, CSR3); ++ val = lp->a->read_csr(ioaddr, CSR3); + val &= 0x00ff; +- lp->a.write_csr(ioaddr, CSR3, val); ++ lp->a->write_csr(ioaddr, CSR3, val); + napi_enable(&lp->napi); + } + +@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev) + r = mii_link_ok(&lp->mii_if); + } else if (lp->chip_version >= PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ +- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); ++ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; + } +@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); + +@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev, + static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) + { + struct pcnet32_private *lp = netdev_priv(dev); +- struct pcnet32_access *a = &lp->a; /* access to registers */ ++ struct pcnet32_access *a = lp->a; /* access to registers */ + ulong ioaddr = dev->base_addr; /* card base I/O address */ + struct sk_buff *skb; /* sk buff */ + int x, i; /* counters */ +@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); + + /* Reset the PCNET32 */ +- lp->a.reset(ioaddr); +- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ ++ lp->a->reset(ioaddr); ++ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + + /* switch pcnet32 to 32bit mode */ +- lp->a.write_bcr(ioaddr, 20, 2); ++ lp->a->write_bcr(ioaddr, 20, 2); + + /* purge & init rings but don't actually restart */ + pcnet32_restart(dev, 0x0000); + +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + + /* Initialize Transmit buffers. */ + size = data_len + 15; +@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) + + /* set int loopback in CSR15 */ + x = a->read_csr(ioaddr, CSR15) & 0xfffc; +- lp->a.write_csr(ioaddr, CSR15, x | 0x0044); ++ lp->a->write_csr(ioaddr, CSR15, x | 0x0044); + + teststatus = cpu_to_le16(0x8000); +- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ ++ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ + + /* Check status of descriptors */ + for (x = 0; x < numbuffs; x++) { +@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) + } + } + +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + wmb(); + if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { + printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); +@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) + pcnet32_restart(dev, CSR0_NORMAL); + } else { + pcnet32_purge_rx_ring(dev); +- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ ++ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + } + spin_unlock_irqrestore(&lp->lock, flags); + +@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) + static void pcnet32_led_blink_callback(struct net_device *dev) + { + struct pcnet32_private *lp = netdev_priv(dev); +- struct pcnet32_access *a = &lp->a; ++ struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + int i; +@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev) + static int pcnet32_phys_id(struct net_device *dev, u32 data) + { + struct pcnet32_private *lp = netdev_priv(dev); +- struct pcnet32_access *a = &lp->a; ++ struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + int i, regs[4]; +@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + { + int csr5; + struct pcnet32_private *lp = netdev_priv(dev); +- struct pcnet32_access *a = &lp->a; ++ struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + +@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) + spin_lock_irqsave(&lp->lock, flags); + if (pcnet32_tx(dev)) { + /* reset the chip to clear the error condition, then restart */ +- lp->a.reset(ioaddr); +- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ ++ lp->a->reset(ioaddr); ++ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + pcnet32_restart(dev, CSR0_START); + netif_wake_queue(dev); + } +@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) + __napi_complete(napi); + + /* clear interrupt masks */ +- val = lp->a.read_csr(ioaddr, CSR3); ++ val = lp->a->read_csr(ioaddr, CSR3); + val &= 0x00ff; +- lp->a.write_csr(ioaddr, CSR3, val); ++ lp->a->write_csr(ioaddr, CSR3, val); + + /* Set interrupt enable. */ +- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); + + spin_unlock_irqrestore(&lp->lock, flags); + } +@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + int i, csr0; + u16 *buff = ptr; + struct pcnet32_private *lp = netdev_priv(dev); +- struct pcnet32_access *a = &lp->a; ++ struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + +@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + for (j = 0; j < PCNET32_MAX_PHYS; j++) { + if (lp->phymask & (1 << j)) { + for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { +- lp->a.write_bcr(ioaddr, 33, ++ lp->a->write_bcr(ioaddr, 33, + (j << 5) | i); +- *buff++ = lp->a.read_bcr(ioaddr, 34); ++ *buff++ = lp->a->read_bcr(ioaddr, 34); + } + } + } +@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) + ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) + lp->options |= PCNET32_PORT_FD; + +- lp->a = *a; ++ lp->a = a; + + /* prior to register_netdev, dev->name is not yet correct */ + if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { +@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) + if (lp->mii) { + /* lp->phycount and lp->phymask are set to 0 by memset above */ + +- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; ++ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; + /* scan for PHYs */ + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + unsigned short id1, id2; +@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) + "Found PHY %04x:%04x at address %d.\n", + id1, id2, i); + } +- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); ++ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); + if (lp->phycount > 1) { + lp->options |= PCNET32_PORT_MII; + } +@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev) + } + + /* Reset the PCNET32 */ +- lp->a.reset(ioaddr); ++ lp->a->reset(ioaddr); + + /* switch pcnet32 to 32bit mode */ +- lp->a.write_bcr(ioaddr, 20, 2); ++ lp->a->write_bcr(ioaddr, 20, 2); + + if (netif_msg_ifup(lp)) + printk(KERN_DEBUG +@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev) + (u32) (lp->init_dma_addr)); + + /* set/reset autoselect bit */ +- val = lp->a.read_bcr(ioaddr, 2) & ~2; ++ val = lp->a->read_bcr(ioaddr, 2) & ~2; + if (lp->options & PCNET32_PORT_ASEL) + val |= 2; +- lp->a.write_bcr(ioaddr, 2, val); ++ lp->a->write_bcr(ioaddr, 2, val); + + /* handle full duplex setting */ + if (lp->mii_if.full_duplex) { +- val = lp->a.read_bcr(ioaddr, 9) & ~3; ++ val = lp->a->read_bcr(ioaddr, 9) & ~3; + if (lp->options & PCNET32_PORT_FD) { + val |= 1; + if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) +@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev) + if (lp->chip_version == 0x2627) + val |= 3; + } +- lp->a.write_bcr(ioaddr, 9, val); ++ lp->a->write_bcr(ioaddr, 9, val); + } + + /* set/reset GPSI bit in test register */ +- val = lp->a.read_csr(ioaddr, 124) & ~0x10; ++ val = lp->a->read_csr(ioaddr, 124) & ~0x10; + if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) + val |= 0x10; +- lp->a.write_csr(ioaddr, 124, val); ++ lp->a->write_csr(ioaddr, 124, val); + + /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ + if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && +@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev) + * duplex, and/or enable auto negotiation, and clear DANAS + */ + if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { +- lp->a.write_bcr(ioaddr, 32, +- lp->a.read_bcr(ioaddr, 32) | 0x0080); ++ lp->a->write_bcr(ioaddr, 32, ++ lp->a->read_bcr(ioaddr, 32) | 0x0080); + /* disable Auto Negotiation, set 10Mpbs, HD */ +- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; ++ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; + if (lp->options & PCNET32_PORT_FD) + val |= 0x10; + if (lp->options & PCNET32_PORT_100) + val |= 0x08; +- lp->a.write_bcr(ioaddr, 32, val); ++ lp->a->write_bcr(ioaddr, 32, val); + } else { + if (lp->options & PCNET32_PORT_ASEL) { +- lp->a.write_bcr(ioaddr, 32, +- lp->a.read_bcr(ioaddr, ++ lp->a->write_bcr(ioaddr, 32, ++ lp->a->read_bcr(ioaddr, + 32) | 0x0080); + /* enable auto negotiate, setup, disable fd */ +- val = lp->a.read_bcr(ioaddr, 32) & ~0x98; ++ val = lp->a->read_bcr(ioaddr, 32) & ~0x98; + val |= 0x20; +- lp->a.write_bcr(ioaddr, 32, val); ++ lp->a->write_bcr(ioaddr, 32, val); + } + } + } else { +@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev) + * There is really no good other way to handle multiple PHYs + * other than turning off all automatics + */ +- val = lp->a.read_bcr(ioaddr, 2); +- lp->a.write_bcr(ioaddr, 2, val & ~2); +- val = lp->a.read_bcr(ioaddr, 32); +- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ ++ val = lp->a->read_bcr(ioaddr, 2); ++ lp->a->write_bcr(ioaddr, 2, val & ~2); ++ val = lp->a->read_bcr(ioaddr, 32); ++ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ + + if (!(lp->options & PCNET32_PORT_ASEL)) { + /* setup ecmd */ +@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev) + ecmd.speed = + lp-> + options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10; +- bcr9 = lp->a.read_bcr(ioaddr, 9); ++ bcr9 = lp->a->read_bcr(ioaddr, 9); + + if (lp->options & PCNET32_PORT_FD) { + ecmd.duplex = DUPLEX_FULL; +@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev) + ecmd.duplex = DUPLEX_HALF; + bcr9 |= ~(1 << 0); + } +- lp->a.write_bcr(ioaddr, 9, bcr9); ++ lp->a->write_bcr(ioaddr, 9, bcr9); + } + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { +@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev) + + #ifdef DO_DXSUFLO + if (lp->dxsuflo) { /* Disable transmit stop on underflow */ +- val = lp->a.read_csr(ioaddr, CSR3); ++ val = lp->a->read_csr(ioaddr, CSR3); + val |= 0x40; +- lp->a.write_csr(ioaddr, CSR3, val); ++ lp->a->write_csr(ioaddr, CSR3, val); + } + #endif + +@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev) + napi_enable(&lp->napi); + + /* Re-initialize the PCNET32, and start it when done. */ +- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); +- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); ++ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); ++ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); + +- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ +- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); ++ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); + + netif_start_queue(dev); + +@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev) + + i = 0; + while (i++ < 100) +- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) ++ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ +- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL); ++ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); + + if (netif_msg_ifup(lp)) + printk(KERN_DEBUG + "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", + dev->name, i, + (u32) (lp->init_dma_addr), +- lp->a.read_csr(ioaddr, CSR0)); ++ lp->a->read_csr(ioaddr, CSR0)); + + spin_unlock_irqrestore(&lp->lock, flags); + +@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev) + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ +- lp->a.write_bcr(ioaddr, 20, 4); ++ lp->a->write_bcr(ioaddr, 20, 4); + + err_free_irq: + spin_unlock_irqrestore(&lp->lock, flags); +@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) + + /* wait for stop */ + for (i = 0; i < 100; i++) +- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP) ++ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) + break; + + if (i >= 100 && netif_msg_drv(lp)) +@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) + return; + + /* ReInit Ring */ +- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); + i = 0; + while (i++ < 1000) +- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) ++ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + +- lp->a.write_csr(ioaddr, CSR0, csr0_bits); ++ lp->a->write_csr(ioaddr, CSR0, csr0_bits); + } + + static void pcnet32_tx_timeout(struct net_device *dev) +@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev) + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR + "%s: transmit timed out, status %4.4x, resetting.\n", +- dev->name, lp->a.read_csr(ioaddr, CSR0)); +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); ++ dev->name, lp->a->read_csr(ioaddr, CSR0)); ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + dev->stats.tx_errors++; + if (netif_msg_tx_err(lp)) { + int i; +@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, + if (netif_msg_tx_queued(lp)) { + printk(KERN_DEBUG + "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", +- dev->name, lp->a.read_csr(ioaddr, CSR0)); ++ dev->name, lp->a->read_csr(ioaddr, CSR0)); + } + + /* Default status -- will not enable Successful-TxDone +@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, + dev->stats.tx_bytes += skb->len; + + /* Trigger an immediate send poll. */ +- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); + + dev->trans_start = jiffies; + +@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id) + + spin_lock(&lp->lock); + +- csr0 = lp->a.read_csr(ioaddr, CSR0); ++ csr0 = lp->a->read_csr(ioaddr, CSR0); + while ((csr0 & 0x8f00) && --boguscnt >= 0) { + if (csr0 == 0xffff) { + break; /* PCMCIA remove happened */ + } + /* Acknowledge all of the current interrupt sources ASAP. */ +- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f); ++ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); + + if (netif_msg_intr(lp)) + printk(KERN_DEBUG + "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", +- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0)); ++ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0)); + + /* Log misc errors. */ + if (csr0 & 0x4000) +@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id) + if (napi_schedule_prep(&lp->napi)) { + u16 val; + /* set interrupt masks */ +- val = lp->a.read_csr(ioaddr, CSR3); ++ val = lp->a->read_csr(ioaddr, CSR3); + val |= 0x5f00; +- lp->a.write_csr(ioaddr, CSR3, val); ++ lp->a->write_csr(ioaddr, CSR3, val); + + __napi_schedule(&lp->napi); + break; + } +- csr0 = lp->a.read_csr(ioaddr, CSR0); ++ csr0 = lp->a->read_csr(ioaddr, CSR0); + } + + if (netif_msg_intr(lp)) + printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", +- dev->name, lp->a.read_csr(ioaddr, CSR0)); ++ dev->name, lp->a->read_csr(ioaddr, CSR0)); + + spin_unlock(&lp->lock); + +@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev) + + spin_lock_irqsave(&lp->lock, flags); + +- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); ++ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); + + if (netif_msg_ifdown(lp)) + printk(KERN_DEBUG + "%s: Shutting down ethercard, status was %2.2x.\n", +- dev->name, lp->a.read_csr(ioaddr, CSR0)); ++ dev->name, lp->a->read_csr(ioaddr, CSR0)); + + /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ +- lp->a.write_bcr(ioaddr, 20, 4); ++ lp->a->write_bcr(ioaddr, 20, 4); + + spin_unlock_irqrestore(&lp->lock, flags); + +@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); +- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); ++ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); + spin_unlock_irqrestore(&lp->lock, flags); + + return &dev->stats; +@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev) + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = cpu_to_le32(~0U); + ib->filter[1] = cpu_to_le32(~0U); +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); + return; + } + /* clear the multicast filter */ +@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev) + mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); + } + for (i = 0; i < 4; i++) +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, + le16_to_cpu(mcast_table[i])); + return; + } +@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev) + + spin_lock_irqsave(&lp->lock, flags); + suspended = pcnet32_suspend(dev, &flags, 0); +- csr15 = lp->a.read_csr(ioaddr, CSR15); ++ csr15 = lp->a->read_csr(ioaddr, CSR15); + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + if (netif_msg_hw(lp)) +@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev) + lp->init_block->mode = + cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << + 7); +- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); ++ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); + } else { + lp->init_block->mode = + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); +- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); ++ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); + pcnet32_load_multicast(dev); + } + + if (suspended) { + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ +- csr5 = lp->a.read_csr(ioaddr, CSR5); +- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); ++ csr5 = lp->a->read_csr(ioaddr, CSR5); ++ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } else { +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + pcnet32_restart(dev, CSR0_NORMAL); + netif_wake_queue(dev); + } +@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num) + if (!lp->mii) + return 0; + +- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); +- val_out = lp->a.read_bcr(ioaddr, 34); ++ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); ++ val_out = lp->a->read_bcr(ioaddr, 34); + + return val_out; + } +@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) + if (!lp->mii) + return; + +- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); +- lp->a.write_bcr(ioaddr, 34, val); ++ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); ++ lp->a->write_bcr(ioaddr, 34, val); + } + + static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) + curr_link = mii_link_ok(&lp->mii_if); + } else { + ulong ioaddr = dev->base_addr; /* card base I/O address */ +- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); ++ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + } + if (!curr_link) { + if (prev_link || verbose) { +@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) + (ecmd.duplex == + DUPLEX_FULL) ? "full" : "half"); + } +- bcr9 = lp->a.read_bcr(dev->base_addr, 9); ++ bcr9 = lp->a->read_bcr(dev->base_addr, 9); + if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { + if (lp->mii_if.full_duplex) + bcr9 |= (1 << 0); + else + bcr9 &= ~(1 << 0); +- lp->a.write_bcr(dev->base_addr, 9, bcr9); ++ lp->a->write_bcr(dev->base_addr, 9, bcr9); + } + } else { + if (netif_msg_link(lp)) +diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c +index 9235901..d31e726 100644 +--- a/drivers/net/pppol2tp.c ++++ b/drivers/net/pppol2tp.c +@@ -1174,7 +1174,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) + + /* Get routing info from the tunnel socket */ + skb_dst_drop(skb); +- skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun))); ++ skb_dst_set(skb, dst_clone(__sk_dst_check(sk_tun, 0))); + pppol2tp_skb_set_owner_w(skb, sk_tun); + + /* Calculate UDP checksum if configured to do so */ +diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c +index 7cc9898..6eb50d3 100644 +--- a/drivers/net/sis190.c ++++ b/drivers/net/sis190.c +@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, + static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, + struct net_device *dev) + { +- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 }; ++ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 }; + struct sis190_private *tp = netdev_priv(dev); + struct pci_dev *isa_bridge; + u8 reg, tmp8; +diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c +index e13685a..60c948c 100644 +--- a/drivers/net/sundance.c ++++ b/drivers/net/sundance.c +@@ -225,7 +225,7 @@ enum { + struct pci_id_info { + const char *name; + }; +-static const struct pci_id_info pci_id_tbl[] __devinitdata = { ++static const struct pci_id_info pci_id_tbl[] __devinitconst = { + {"D-Link DFE-550TX FAST Ethernet Adapter"}, + {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, + {"D-Link DFE-580TX 4 port Server Adapter"}, +diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h +index 529f55a..cccaa18 100644 +--- a/drivers/net/tg3.h ++++ b/drivers/net/tg3.h +@@ -95,6 +95,7 @@ + #define CHIPREV_ID_5750_A0 0x4000 + #define CHIPREV_ID_5750_A1 0x4001 + #define CHIPREV_ID_5750_A3 0x4003 ++#define CHIPREV_ID_5750_C1 0x4201 + #define CHIPREV_ID_5750_C2 0x4202 + #define CHIPREV_ID_5752_A0_HW 0x5000 + #define CHIPREV_ID_5752_A0 0x6000 +diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c +index b9db1b5..720f9ce 100644 +--- a/drivers/net/tokenring/abyss.c ++++ b/drivers/net/tokenring/abyss.c +@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = { + + static int __init abyss_init (void) + { +- abyss_netdev_ops = tms380tr_netdev_ops; ++ pax_open_kernel(); ++ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); + +- abyss_netdev_ops.ndo_open = abyss_open; +- abyss_netdev_ops.ndo_stop = abyss_close; ++ *(void **)&abyss_netdev_ops.ndo_open = abyss_open; ++ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close; ++ pax_close_kernel(); + + return pci_register_driver(&abyss_driver); + } +diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c +index 456f8bf..373e56d 100644 +--- a/drivers/net/tokenring/madgemc.c ++++ b/drivers/net/tokenring/madgemc.c +@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = { + + static int __init madgemc_init (void) + { +- madgemc_netdev_ops = tms380tr_netdev_ops; +- madgemc_netdev_ops.ndo_open = madgemc_open; +- madgemc_netdev_ops.ndo_stop = madgemc_close; ++ pax_open_kernel(); ++ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); ++ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open; ++ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close; ++ pax_close_kernel(); + + return mca_register_driver (&madgemc_driver); + } +diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c +index 16e8783..925bd49 100644 +--- a/drivers/net/tokenring/proteon.c ++++ b/drivers/net/tokenring/proteon.c +@@ -353,9 +353,11 @@ static int __init proteon_init(void) + struct platform_device *pdev; + int i, num = 0, err = 0; + +- proteon_netdev_ops = tms380tr_netdev_ops; +- proteon_netdev_ops.ndo_open = proteon_open; +- proteon_netdev_ops.ndo_stop = tms380tr_close; ++ pax_open_kernel(); ++ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); ++ *(void **)&proteon_netdev_ops.ndo_open = proteon_open; ++ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close; ++ pax_close_kernel(); + + err = platform_driver_register(&proteon_driver); + if (err) +diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c +index 46db5c5..37c1536 100644 +--- a/drivers/net/tokenring/skisa.c ++++ b/drivers/net/tokenring/skisa.c +@@ -363,9 +363,11 @@ static int __init sk_isa_init(void) + struct platform_device *pdev; + int i, num = 0, err = 0; + +- sk_isa_netdev_ops = tms380tr_netdev_ops; +- sk_isa_netdev_ops.ndo_open = sk_isa_open; +- sk_isa_netdev_ops.ndo_stop = tms380tr_close; ++ pax_open_kernel(); ++ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); ++ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open; ++ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close; ++ pax_close_kernel(); + + err = platform_driver_register(&sk_isa_driver); + if (err) +diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c +index 74e5ba4..5cf6bc9 100644 +--- a/drivers/net/tulip/de2104x.c ++++ b/drivers/net/tulip/de2104x.c +@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de) + struct de_srom_info_leaf *il; + void *bufp; + ++ pax_track_stack(); ++ + /* download entire eeprom */ + for (i = 0; i < DE_EEPROM_WORDS; i++) + ((__le16 *)ee_data)[i] = +diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c +index a8349b7..90f9dfe 100644 +--- a/drivers/net/tulip/de4x5.c ++++ b/drivers/net/tulip/de4x5.c +@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + for (i=0; i<ETH_ALEN; i++) { + tmp.addr[i] = dev->dev_addr[i]; + } +- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; ++ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + break; + + case DE4X5_SET_HWADDR: /* Set the hardware address */ +@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + spin_lock_irqsave(&lp->lock, flags); + memcpy(&statbuf, &lp->pktStats, ioc->len); + spin_unlock_irqrestore(&lp->lock, flags); +- if (copy_to_user(ioc->data, &statbuf, ioc->len)) ++ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len)) + return -EFAULT; + break; + } +diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c +index 391acd3..56d11cd 100644 +--- a/drivers/net/tulip/eeprom.c ++++ b/drivers/net/tulip/eeprom.c +@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = { + {NULL}}; + + +-static const char *block_name[] __devinitdata = { ++static const char *block_name[] __devinitconst = { + "21140 non-MII", + "21140 MII PHY", + "21142 Serial PHY", +diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c +index b38d3b7..b1cff23 100644 +--- a/drivers/net/tulip/winbond-840.c ++++ b/drivers/net/tulip/winbond-840.c +@@ -235,7 +235,7 @@ struct pci_id_info { + int drv_flags; /* Driver use, intended as capability flags. */ + }; + +-static const struct pci_id_info pci_id_tbl[] __devinitdata = { ++static const struct pci_id_info pci_id_tbl[] __devinitconst = { + { /* Sometime a Level-One switch card. */ + "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, + { "Winbond W89c840", CanHaveMII | HasBrokenTx}, +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c +index f450bc9..2b747c8 100644 +--- a/drivers/net/usb/hso.c ++++ b/drivers/net/usb/hso.c +@@ -71,7 +71,7 @@ + #include <asm/byteorder.h> + #include <linux/serial_core.h> + #include <linux/serial.h> +- ++#include <asm/local.h> + + #define DRIVER_VERSION "1.2" + #define MOD_AUTHOR "Option Wireless" +@@ -258,7 +258,7 @@ struct hso_serial { + + /* from usb_serial_port */ + struct tty_struct *tty; +- int open_count; ++ local_t open_count; + spinlock_t serial_lock; + + int (*write_data) (struct hso_serial *serial); +@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) + struct urb *urb; + + urb = serial->rx_urb[0]; +- if (serial->open_count > 0) { ++ if (local_read(&serial->open_count) > 0) { + count = put_rxbuf_data(urb, serial); + if (count == -1) + return; +@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) + DUMP1(urb->transfer_buffer, urb->actual_length); + + /* Anyone listening? */ +- if (serial->open_count == 0) ++ if (local_read(&serial->open_count) == 0) + return; + + if (status == 0) { +@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) + spin_unlock_irq(&serial->serial_lock); + + /* check for port already opened, if not set the termios */ +- serial->open_count++; +- if (serial->open_count == 1) { ++ if (local_inc_return(&serial->open_count) == 1) { + tty->low_latency = 1; + serial->rx_state = RX_IDLE; + /* Force default termio settings */ +@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) + result = hso_start_serial_device(serial->parent, GFP_KERNEL); + if (result) { + hso_stop_serial_device(serial->parent); +- serial->open_count--; ++ local_dec(&serial->open_count); + kref_put(&serial->parent->ref, hso_serial_ref_free); + } + } else { +@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) + + /* reset the rts and dtr */ + /* do the actual close */ +- serial->open_count--; ++ local_dec(&serial->open_count); + +- if (serial->open_count <= 0) { +- serial->open_count = 0; ++ if (local_read(&serial->open_count) <= 0) { ++ local_set(&serial->open_count, 0); + spin_lock_irq(&serial->serial_lock); + if (serial->tty == tty) { + serial->tty->driver_data = NULL; +@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) + + /* the actual setup */ + spin_lock_irqsave(&serial->serial_lock, flags); +- if (serial->open_count) ++ if (local_read(&serial->open_count)) + _hso_serial_set_termios(tty, old); + else + tty->termios = old; +@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface) + /* Start all serial ports */ + for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { + if (serial_table[i] && (serial_table[i]->interface == iface)) { +- if (dev2ser(serial_table[i])->open_count) { ++ if (local_read(&dev2ser(serial_table[i])->open_count)) { + result = + hso_start_serial_device(serial_table[i], GFP_NOIO); + hso_kick_transmit(dev2ser(serial_table[i])); +diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h +index 3e94f0c..ffdd926 100644 +--- a/drivers/net/vxge/vxge-config.h ++++ b/drivers/net/vxge/vxge-config.h +@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs { + void (*link_down)(struct __vxge_hw_device *devh); + void (*crit_err)(struct __vxge_hw_device *devh, + enum vxge_hw_event type, u64 ext_data); +-}; ++} __no_const; + + /* + * struct __vxge_hw_blockpool_entry - Block private data structure +diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c +index 068d7a9..35293de 100644 +--- a/drivers/net/vxge/vxge-main.c ++++ b/drivers/net/vxge/vxge-main.c +@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) + struct sk_buff *completed[NR_SKB_COMPLETED]; + int more; + ++ pax_track_stack(); ++ + do { + more = 0; + skb_ptr = completed; +@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) + u8 mtable[256] = {0}; /* CPU to vpath mapping */ + int index; + ++ pax_track_stack(); ++ + /* + * Filling + * - itable with bucket numbers +diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h +index 461742b..81be42e 100644 +--- a/drivers/net/vxge/vxge-traffic.h ++++ b/drivers/net/vxge/vxge-traffic.h +@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs { + struct vxge_hw_mempool_dma *dma_object, + u32 index, + u32 is_last); +-}; ++} __no_const; + + void + __vxge_hw_mempool_destroy( +diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c +index cd8cb95..4153b79 100644 +--- a/drivers/net/wan/cycx_x25.c ++++ b/drivers/net/wan/cycx_x25.c +@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len) + unsigned char hex[1024], + * phex = hex; + ++ pax_track_stack(); ++ + if (len >= (sizeof(hex) / 2)) + len = (sizeof(hex) / 2) - 1; + +diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c +index aa9248f..a4e3c3b 100644 +--- a/drivers/net/wan/hdlc_x25.c ++++ b/drivers/net/wan/hdlc_x25.c +@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) + + static int x25_open(struct net_device *dev) + { +- struct lapb_register_struct cb; ++ static struct lapb_register_struct cb = { ++ .connect_confirmation = x25_connected, ++ .connect_indication = x25_connected, ++ .disconnect_confirmation = x25_disconnected, ++ .disconnect_indication = x25_disconnected, ++ .data_indication = x25_data_indication, ++ .data_transmit = x25_data_transmit ++ }; + int result; + +- cb.connect_confirmation = x25_connected; +- cb.connect_indication = x25_connected; +- cb.disconnect_confirmation = x25_disconnected; +- cb.disconnect_indication = x25_disconnected; +- cb.data_indication = x25_data_indication; +- cb.data_transmit = x25_data_transmit; +- + result = lapb_register(dev, &cb); + if (result != LAPB_OK) + return result; +diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c +index 5ad287c..783b020 100644 +--- a/drivers/net/wimax/i2400m/usb-fw.c ++++ b/drivers/net/wimax/i2400m/usb-fw.c +@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m, + int do_autopm = 1; + DECLARE_COMPLETION_ONSTACK(notif_completion); + ++ pax_track_stack(); ++ + d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n", + i2400m, ack, ack_size); + BUG_ON(_ack == i2400m->bm_ack_buf); +diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c +index 6c26840..62c97c3 100644 +--- a/drivers/net/wireless/airo.c ++++ b/drivers/net/wireless/airo.c +@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) { + BSSListElement * loop_net; + BSSListElement * tmp_net; + ++ pax_track_stack(); ++ + /* Blow away current list of scan results */ + list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) { + list_move_tail (&loop_net->list, &ai->network_free_list); +@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) + WepKeyRid wkr; + int rc; + ++ pax_track_stack(); ++ + memset( &mySsid, 0, sizeof( mySsid ) ); + kfree (ai->flash); + ai->flash = NULL; +@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode, + __le32 *vals = stats.vals; + int len; + ++ pax_track_stack(); ++ + if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) + return -ENOMEM; + data = (struct proc_data *)file->private_data; +@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { + /* If doLoseSync is not 1, we won't do a Lose Sync */ + int doLoseSync = -1; + ++ pax_track_stack(); ++ + if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) + return -ENOMEM; + data = (struct proc_data *)file->private_data; +@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev, + int i; + int loseSync = capable(CAP_NET_ADMIN) ? 1: -1; + ++ pax_track_stack(); ++ + qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL); + if (!qual) + return -ENOMEM; +@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local) + CapabilityRid cap_rid; + __le32 *vals = stats_rid.vals; + ++ pax_track_stack(); ++ + /* Get stats out of the card */ + clear_bit(JOB_WSTATS, &local->jobs); + if (local->power.event) { +diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c +index 747508c..82e965d 100644 +--- a/drivers/net/wireless/ath/ath5k/debug.c ++++ b/drivers/net/wireless/ath/ath5k/debug.c +@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf, + unsigned int v; + u64 tsf; + ++ pax_track_stack(); ++ + v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON); + len += snprintf(buf+len, sizeof(buf)-len, + "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n", +@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, + unsigned int len = 0; + unsigned int i; + ++ pax_track_stack(); ++ + len += snprintf(buf+len, sizeof(buf)-len, + "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level); + +diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c +index 2be4c22..593b1eb 100644 +--- a/drivers/net/wireless/ath/ath9k/debug.c ++++ b/drivers/net/wireless/ath/ath9k/debug.c +@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, + char buf[512]; + unsigned int len = 0; + ++ pax_track_stack(); ++ + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); + len += snprintf(buf + len, sizeof(buf) - len, +@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, + int i; + u8 addr[ETH_ALEN]; + ++ pax_track_stack(); ++ + len += snprintf(buf + len, sizeof(buf) - len, + "primary: %s (%s chan=%d ht=%d)\n", + wiphy_name(sc->pri_wiphy->hw->wiphy), +diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c +index 80b19a4..dab3a45 100644 +--- a/drivers/net/wireless/b43/debugfs.c ++++ b/drivers/net/wireless/b43/debugfs.c +@@ -43,7 +43,7 @@ static struct dentry *rootdir; + struct b43_debugfs_fops { + ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43_wldev *dev, const char *buf, size_t count); +- struct file_operations fops; ++ const struct file_operations fops; + /* Offset of struct b43_dfs_file in struct b43_dfsentry */ + size_t file_struct_offset; + }; +diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c +index 1f85ac5..c99b4b4 100644 +--- a/drivers/net/wireless/b43legacy/debugfs.c ++++ b/drivers/net/wireless/b43legacy/debugfs.c +@@ -44,7 +44,7 @@ static struct dentry *rootdir; + struct b43legacy_debugfs_fops { + ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); +- struct file_operations fops; ++ const struct file_operations fops; + /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ + size_t file_struct_offset; + /* Take wl->irq_lock before calling read/write? */ +diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c +index 43102bf..3b569c3 100644 +--- a/drivers/net/wireless/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/ipw2x00/ipw2100.c +@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, + int err; + DECLARE_SSID_BUF(ssid); + ++ pax_track_stack(); ++ + IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len)); + + if (ssid_len) +@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv, + struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters; + int err; + ++ pax_track_stack(); ++ + IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n", + idx, keylen, len); + +diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c +index 282b1f7..169f0cf 100644 +--- a/drivers/net/wireless/ipw2x00/libipw_rx.c ++++ b/drivers/net/wireless/ipw2x00/libipw_rx.c +@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device + unsigned long flags; + DECLARE_SSID_BUF(ssid); + ++ pax_track_stack(); ++ + LIBIPW_DEBUG_SCAN("'%s' (%pM" + "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", + print_ssid(ssid, info_element->data, info_element->len), +diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c +index 950267a..80d5fd2 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-1000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-1000.c +@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = { + }, + }; + +-static struct iwl_ops iwl1000_ops = { ++static const struct iwl_ops iwl1000_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl1000_lib, + .hcmd = &iwl5000_hcmd, +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c +index 56bfcc3..b348020 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c +@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { + .build_addsta_hcmd = iwl3945_build_addsta_hcmd, + }; + +-static struct iwl_ops iwl3945_ops = { ++static const struct iwl_ops iwl3945_ops = { + .ucode = &iwl3945_ucode, + .lib = &iwl3945_lib, + .hcmd = &iwl3945_hcmd, +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c +index 585b8d4..e142963 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c +@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = { + }, + }; + +-static struct iwl_ops iwl4965_ops = { ++static const struct iwl_ops iwl4965_ops = { + .ucode = &iwl4965_ucode, + .lib = &iwl4965_lib, + .hcmd = &iwl4965_hcmd, +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c +index 1f423f2..e37c192 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c +@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = { + }, + }; + +-struct iwl_ops iwl5000_ops = { ++const struct iwl_ops iwl5000_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl5000_lib, + .hcmd = &iwl5000_hcmd, + .utils = &iwl5000_hcmd_utils, + }; + +-static struct iwl_ops iwl5150_ops = { ++static const struct iwl_ops iwl5150_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl5150_lib, + .hcmd = &iwl5000_hcmd, +diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c +index 1473452..f07d5e1 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-6000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c +@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = { + .calc_rssi = iwl5000_calc_rssi, + }; + +-static struct iwl_ops iwl6000_ops = { ++static const struct iwl_ops iwl6000_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl6000_lib, + .hcmd = &iwl5000_hcmd, +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +index 1a3dfa2..b3e0a61 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, + u8 active_index = 0; + s32 tpt = 0; + ++ pax_track_stack(); ++ + IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); + + if (!ieee80211_is_data(hdr->frame_control) || +@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, + u8 valid_tx_ant = 0; + struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; + ++ pax_track_stack(); ++ + /* Override starting rate (index 0) if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c +index 0e56d78..6a3c107 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c +@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (iwl_debug_level & IWL_DL_INFO) + dev_printk(KERN_DEBUG, &(pdev->dev), + "Disabling hw_scan\n"); +- iwl_hw_ops.hw_scan = NULL; ++ pax_open_kernel(); ++ *(void **)&iwl_hw_ops.hw_scan = NULL; ++ pax_close_kernel(); + } + + hw = iwl_alloc_all(cfg, &iwl_hw_ops); +diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h +index cbc6290..eb323d7 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-debug.h ++++ b/drivers/net/wireless/iwlwifi/iwl-debug.h +@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv); + #endif + + #else +-#define IWL_DEBUG(__priv, level, fmt, args...) +-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) ++#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0) ++#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0) + static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, + void *p, u32 len) + {} +diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c +index a198bcf..8e68233 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c ++++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c +@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file, + int pos = 0; + const size_t bufsz = sizeof(buf); + ++ pax_track_stack(); ++ + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", + test_bit(STATUS_HCMD_ACTIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n", +@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, + const size_t bufsz = sizeof(buf); + ssize_t ret; + ++ pax_track_stack(); ++ + for (i = 0; i < AC_NUM; i++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); +diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h +index 3539ea4..b174bfa 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-dev.h ++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h +@@ -68,7 +68,7 @@ struct iwl_tx_queue; + + /* shared structures from iwl-5000.c */ + extern struct iwl_mod_params iwl50_mod_params; +-extern struct iwl_ops iwl5000_ops; ++extern const struct iwl_ops iwl5000_ops; + extern struct iwl_ucode_ops iwl5000_ucode; + extern struct iwl_lib_ops iwl5000_lib; + extern struct iwl_hcmd_ops iwl5000_hcmd; +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c +index 619590d..69235ee 100644 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c +@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e + */ + if (iwl3945_mod_params.disable_hw_scan) { + IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); +- iwl3945_hw_ops.hw_scan = NULL; ++ pax_open_kernel(); ++ *(void **)&iwl3945_hw_ops.hw_scan = NULL; ++ pax_close_kernel(); + } + + +diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c +index 1465379..fe4d78b 100644 +--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c ++++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c +@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp, + int buf_len = 512; + size_t len = 0; + ++ pax_track_stack(); ++ + if (*ppos != 0) + return 0; + if (count < sizeof(buf)) +diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c +index 893a55c..7f66a50 100644 +--- a/drivers/net/wireless/libertas/debugfs.c ++++ b/drivers/net/wireless/libertas/debugfs.c +@@ -708,7 +708,7 @@ out_unlock: + struct lbs_debugfs_files { + const char *name; + int perm; +- struct file_operations fops; ++ const struct file_operations fops; + }; + + static const struct lbs_debugfs_files debugfs_files[] = { +diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c +index 2ecbedb..42704f0 100644 +--- a/drivers/net/wireless/rndis_wlan.c ++++ b/drivers/net/wireless/rndis_wlan.c +@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) + + devdbg(usbdev, "set_rts_threshold %i", rts_threshold); + +- if (rts_threshold < 0 || rts_threshold > 2347) ++ if (rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); +diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c +index 334ccd6..47f8944 100644 +--- a/drivers/oprofile/buffer_sync.c ++++ b/drivers/oprofile/buffer_sync.c +@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm) + if (cookie == NO_COOKIE) + offset = pc; + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + offset = pc; + } + if (cookie != last_cookie) { +@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) + /* add userspace sample */ + + if (!mm) { +- atomic_inc(&oprofile_stats.sample_lost_no_mm); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); + return 0; + } + + cookie = lookup_dcookie(mm, s->eip, &offset); + + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + return 0; + } + +@@ -562,7 +562,7 @@ void sync_buffer(int cpu) + /* ignore backtraces if failed to add a sample */ + if (state == sb_bt_start) { + state = sb_bt_ignore; +- atomic_inc(&oprofile_stats.bt_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); + } + } + release_mm(mm); +diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c +index 5df60a6..72f5c1c 100644 +--- a/drivers/oprofile/event_buffer.c ++++ b/drivers/oprofile/event_buffer.c +@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value) + } + + if (buffer_pos == buffer_size) { +- atomic_inc(&oprofile_stats.event_lost_overflow); ++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); + return; + } + +diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c +index dc8a042..fe5f315 100644 +--- a/drivers/oprofile/oprof.c ++++ b/drivers/oprofile/oprof.c +@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work) + if (oprofile_ops.switch_events()) + return; + +- atomic_inc(&oprofile_stats.multiplex_counter); ++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter); + start_switch_worker(); + } + +diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c +index 61689e8..387f7f8 100644 +--- a/drivers/oprofile/oprofile_stats.c ++++ b/drivers/oprofile/oprofile_stats.c +@@ -30,11 +30,11 @@ void oprofile_reset_stats(void) + cpu_buf->sample_invalid_eip = 0; + } + +- atomic_set(&oprofile_stats.sample_lost_no_mm, 0); +- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.event_lost_overflow, 0); +- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.multiplex_counter, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); ++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); + } + + +diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h +index 0b54e46..a37c527 100644 +--- a/drivers/oprofile/oprofile_stats.h ++++ b/drivers/oprofile/oprofile_stats.h +@@ -13,11 +13,11 @@ + #include <asm/atomic.h> + + struct oprofile_stat_struct { +- atomic_t sample_lost_no_mm; +- atomic_t sample_lost_no_mapping; +- atomic_t bt_lost_no_mapping; +- atomic_t event_lost_overflow; +- atomic_t multiplex_counter; ++ atomic_unchecked_t sample_lost_no_mm; ++ atomic_unchecked_t sample_lost_no_mapping; ++ atomic_unchecked_t bt_lost_no_mapping; ++ atomic_unchecked_t event_lost_overflow; ++ atomic_unchecked_t multiplex_counter; + }; + + extern struct oprofile_stat_struct oprofile_stats; +diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c +index 2766a6d..80c77e2 100644 +--- a/drivers/oprofile/oprofilefs.c ++++ b/drivers/oprofile/oprofilefs.c +@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = { + + + int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, +- char const *name, atomic_t *val) ++ char const *name, atomic_unchecked_t *val) + { + struct dentry *d = __oprofilefs_create_file(sb, root, name, + &atomic_ro_fops, 0444); +diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c +index 13a64bc..ad62835 100644 +--- a/drivers/parisc/pdc_stable.c ++++ b/drivers/parisc/pdc_stable.c +@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr, + return ret; + } + +-static struct sysfs_ops pdcspath_attr_ops = { ++static const struct sysfs_ops pdcspath_attr_ops = { + .show = pdcspath_attr_show, + .store = pdcspath_attr_store, + }; +diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c +index 8eefe56..40751a7 100644 +--- a/drivers/parport/procfs.c ++++ b/drivers/parport/procfs.c +@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write, + + *ppos += len; + +- return copy_to_user(result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0; + } + + #ifdef CONFIG_PARPORT_1284 +@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write, + + *ppos += len; + +- return copy_to_user (result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0; + } + #endif /* IEEE1284.3 support. */ + +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c +index 73e7d8e..c80f3d2 100644 +--- a/drivers/pci/hotplug/acpiphp_glue.c ++++ b/drivers/pci/hotplug/acpiphp_glue.c +@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, + } + + +-static struct acpi_dock_ops acpiphp_dock_ops = { ++static const struct acpi_dock_ops acpiphp_dock_ops = { + .handler = handle_hotplug_event_func, + }; + +diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h +index 9fff878..ad0ad53 100644 +--- a/drivers/pci/hotplug/cpci_hotplug.h ++++ b/drivers/pci/hotplug/cpci_hotplug.h +@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops { + int (*hardware_test) (struct slot* slot, u32 value); + u8 (*get_power) (struct slot* slot); + int (*set_power) (struct slot* slot, int value); +-}; ++} __no_const; + + struct cpci_hp_controller { + unsigned int irq; +diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c +index 76ba8a1..20ca857 100644 +--- a/drivers/pci/hotplug/cpqphp_nvram.c ++++ b/drivers/pci/hotplug/cpqphp_nvram.c +@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start) + + void compaq_nvram_init (void __iomem *rom_start) + { ++ ++#ifndef CONFIG_PAX_KERNEXEC + if (rom_start) { + compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); + } ++#endif ++ + dbg("int15 entry = %p\n", compaq_int15_entry_point); + + /* initialize our int15 lock */ +diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c +index 6151389..0a894ef 100644 +--- a/drivers/pci/hotplug/fakephp.c ++++ b/drivers/pci/hotplug/fakephp.c +@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj) + } + + static struct kobj_type legacy_ktype = { +- .sysfs_ops = &(struct sysfs_ops){ ++ .sysfs_ops = &(const struct sysfs_ops){ + .store = legacy_store, .show = legacy_show + }, + .release = &legacy_release, +diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c +index 5b680df..fe05b7e 100644 +--- a/drivers/pci/intel-iommu.c ++++ b/drivers/pci/intel-iommu.c +@@ -2643,7 +2643,7 @@ error: + return 0; + } + +-static dma_addr_t intel_map_page(struct device *dev, struct page *page, ++dma_addr_t intel_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) + spin_unlock_irqrestore(&async_umap_flush_lock, flags); + } + +-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ++void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) + { +@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, + } + } + +-static void *intel_alloc_coherent(struct device *hwdev, size_t size, ++void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) + { + void *vaddr; +@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, + return NULL; + } + +-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, ++void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) + { + int order; +@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + free_pages((unsigned long)vaddr, order); + } + +-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, ++void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) + { +@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, + return nelems; + } + +-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, ++int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + enum dma_data_direction dir, struct dma_attrs *attrs) + { + int i; +@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne + return nelems; + } + +-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) ++int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) + { + return !dma_addr; + } + +-struct dma_map_ops intel_dma_ops = { ++const struct dma_map_ops intel_dma_ops = { + .alloc_coherent = intel_alloc_coherent, + .free_coherent = intel_free_coherent, + .map_sg = intel_map_sg, +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 5b7056c..607bc94 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -27,9 +27,9 @@ + #define MODULE_PARAM_PREFIX "pcie_aspm." + + /* Note: those are not register definitions */ +-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ +-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ +-#define ASPM_STATE_L1 (4) /* L1 state */ ++#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */ ++#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */ ++#define ASPM_STATE_L1 (4U) /* L1 state */ + #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) + #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) + +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 8105e32..ca10419 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, + return ret; + } + +-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev, ++static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev, + struct device_attribute *attr, + char *buf) + { + return pci_bus_show_cpuaffinity(dev, 0, attr, buf); + } + +-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev, ++static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev, + struct device_attribute *attr, + char *buf) + { +diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c +index a03ad8c..024b0da 100644 +--- a/drivers/pci/proc.c ++++ b/drivers/pci/proc.c +@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = { + static int __init pci_proc_init(void) + { + struct pci_dev *dev = NULL; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); ++#endif + proc_create("devices", 0, proc_bus_pci_dir, + &proc_bus_pci_dev_operations); + proc_initialized = 1; +diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c +index 8c02b6c..5584d8e 100644 +--- a/drivers/pci/slot.c ++++ b/drivers/pci/slot.c +@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj, + return attribute->store ? attribute->store(slot, buf, len) : -EIO; + } + +-static struct sysfs_ops pci_slot_sysfs_ops = { ++static const struct sysfs_ops pci_slot_sysfs_ops = { + .show = pci_slot_attr_show, + .store = pci_slot_attr_store, + }; +diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c +index 30cf71d2..50938f1 100644 +--- a/drivers/pcmcia/pcmcia_ioctl.c ++++ b/drivers/pcmcia/pcmcia_ioctl.c +@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file, + return -EFAULT; + } + } +- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL); ++ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL); + if (!buf) + return -ENOMEM; + +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index 52183c4..b224c69 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd) + return 0; + } + +-static struct backlight_ops acer_bl_ops = { ++static const struct backlight_ops acer_bl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, + }; +diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c +index 767cb61..a87380b 100644 +--- a/drivers/platform/x86/asus-laptop.c ++++ b/drivers/platform/x86/asus-laptop.c +@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device; + */ + static int read_brightness(struct backlight_device *bd); + static int update_bl_status(struct backlight_device *bd); +-static struct backlight_ops asusbl_ops = { ++static const struct backlight_ops asusbl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, + }; +diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c +index d66c07a..a4abaac 100644 +--- a/drivers/platform/x86/asus_acpi.c ++++ b/drivers/platform/x86/asus_acpi.c +@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type) + return 0; + } + +-static struct backlight_ops asus_backlight_data = { ++static const struct backlight_ops asus_backlight_data = { + .get_brightness = read_brightness, + .update_status = set_brightness_status, + }; +diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c +index 11003bb..550ff1b 100644 +--- a/drivers/platform/x86/compal-laptop.c ++++ b/drivers/platform/x86/compal-laptop.c +@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b) + return set_lcd_level(b->props.brightness); + } + +-static struct backlight_ops compalbl_ops = { ++static const struct backlight_ops compalbl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, + }; +diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c +index 07a74da..9dc99fa 100644 +--- a/drivers/platform/x86/dell-laptop.c ++++ b/drivers/platform/x86/dell-laptop.c +@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd) + return buffer.output[1]; + } + +-static struct backlight_ops dell_ops = { ++static const struct backlight_ops dell_ops = { + .get_brightness = dell_get_intensity, + .update_status = dell_send_intensity, + }; +diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c +index c533b1c..5c81f22 100644 +--- a/drivers/platform/x86/eeepc-laptop.c ++++ b/drivers/platform/x86/eeepc-laptop.c +@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device; + */ + static int read_brightness(struct backlight_device *bd); + static int update_bl_status(struct backlight_device *bd); +-static struct backlight_ops eeepcbl_ops = { ++static const struct backlight_ops eeepcbl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, + }; +diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c +index bcd4ba8..a249b35 100644 +--- a/drivers/platform/x86/fujitsu-laptop.c ++++ b/drivers/platform/x86/fujitsu-laptop.c +@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b) + return ret; + } + +-static struct backlight_ops fujitsubl_ops = { ++static const struct backlight_ops fujitsubl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, + }; +diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c +index 759763d..1093ba2 100644 +--- a/drivers/platform/x86/msi-laptop.c ++++ b/drivers/platform/x86/msi-laptop.c +@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b) + return set_lcd_level(b->props.brightness); + } + +-static struct backlight_ops msibl_ops = { ++static const struct backlight_ops msibl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, + }; +diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c +index fe7cf01..9012d8d 100644 +--- a/drivers/platform/x86/panasonic-laptop.c ++++ b/drivers/platform/x86/panasonic-laptop.c +@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd) + return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright); + } + +-static struct backlight_ops pcc_backlight_ops = { ++static const struct backlight_ops pcc_backlight_ops = { + .get_brightness = bl_get, + .update_status = bl_set_status, + }; +diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c +index a2a742c..b37e25e 100644 +--- a/drivers/platform/x86/sony-laptop.c ++++ b/drivers/platform/x86/sony-laptop.c +@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd) + } + + static struct backlight_device *sony_backlight_device; +-static struct backlight_ops sony_backlight_ops = { ++static const struct backlight_ops sony_backlight_ops = { + .update_status = sony_backlight_update_status, + .get_brightness = sony_backlight_get_brightness, + }; +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index 68271ae..5e8fb10 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void) + return 0; + } + +-void static hotkey_mask_warn_incomplete_mask(void) ++static void hotkey_mask_warn_incomplete_mask(void) + { + /* log only what the user can fix... */ + const u32 wantedmask = hotkey_driver_mask & +@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void) + BACKLIGHT_UPDATE_HOTKEY); + } + +-static struct backlight_ops ibm_backlight_data = { ++static const struct backlight_ops ibm_backlight_data = { + .get_brightness = brightness_get, + .update_status = brightness_update_status, + }; +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c +index 51c0a8b..0786629 100644 +--- a/drivers/platform/x86/toshiba_acpi.c ++++ b/drivers/platform/x86/toshiba_acpi.c +@@ -671,7 +671,7 @@ static acpi_status remove_device(void) + return AE_OK; + } + +-static struct backlight_ops toshiba_backlight_data = { ++static const struct backlight_ops toshiba_backlight_data = { + .get_brightness = get_lcd, + .update_status = set_lcd_status, + }; +diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c +index fc83783c..cf370d7 100644 +--- a/drivers/pnp/pnpbios/bioscalls.c ++++ b/drivers/pnp/pnpbios/bioscalls.c +@@ -60,7 +60,7 @@ do { \ + set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ + } while(0) + +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + /* +@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + + cpu = get_cpu(); + save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; ++ ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + /* On some boxes IRQ's during PnP BIOS calls are deadly. */ + spin_lock_irqsave(&pnp_bios_lock, flags); +@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + :"memory"); + spin_unlock_irqrestore(&pnp_bios_lock, flags); + ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + /* If we get here and this is set then the PnP BIOS faulted on us. */ +@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base) + return status; + } + +-void pnpbios_calls_init(union pnp_bios_install_struct *header) ++void __init pnpbios_calls_init(union pnp_bios_install_struct *header) + { + int i; + +@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) + pnp_bios_callpoint.offset = header->fields.pm16offset; + pnp_bios_callpoint.segment = PNP_CS16; + ++ pax_open_kernel(); ++ + for_each_possible_cpu(i) { + struct desc_struct *gdt = get_cpu_gdt_table(i); + if (!gdt) +@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) + set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], + (unsigned long)__va(header->fields.pm16dseg)); + } ++ ++ pax_close_kernel(); + } +diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c +index ba97654..66b99d4 100644 +--- a/drivers/pnp/resource.c ++++ b/drivers/pnp/resource.c +@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) + return 1; + + /* check if the resource is valid */ +- if (*irq < 0 || *irq > 15) ++ if (*irq > 15) + return 0; + + /* check if the resource is reserved */ +@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res) + return 1; + + /* check if the resource is valid */ +- if (*dma < 0 || *dma == 4 || *dma > 7) ++ if (*dma == 4 || *dma > 7) + return 0; + + /* check if the resource is reserved */ +diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c +index 62bb981..24a2dc9 100644 +--- a/drivers/power/bq27x00_battery.c ++++ b/drivers/power/bq27x00_battery.c +@@ -44,7 +44,7 @@ struct bq27x00_device_info; + struct bq27x00_access_methods { + int (*read)(u8 reg, int *rt_value, int b_single, + struct bq27x00_device_info *di); +-}; ++} __no_const; + + struct bq27x00_device_info { + struct device *dev; +diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c +index 62227cd..b5b538b 100644 +--- a/drivers/rtc/rtc-dev.c ++++ b/drivers/rtc/rtc-dev.c +@@ -14,6 +14,7 @@ + #include <linux/module.h> + #include <linux/rtc.h> + #include <linux/sched.h> ++#include <linux/grsecurity.h> + #include "rtc-core.h" + + static dev_t rtc_devt; +@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file, + if (copy_from_user(&tm, uarg, sizeof(tm))) + return -EFAULT; + ++ gr_log_timechange(); ++ + return rtc_set_time(rtc, &tm); + + case RTC_PIE_ON: +diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c +index 968e3c7..fbc637a 100644 +--- a/drivers/s390/cio/qdio_perf.c ++++ b/drivers/s390/cio/qdio_perf.c +@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde; + static int qdio_perf_proc_show(struct seq_file *m, void *v) + { + seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.qdio_int)); ++ (long)atomic_long_read_unchecked(&perf_stats.qdio_int)); + seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.pci_int)); ++ (long)atomic_long_read_unchecked(&perf_stats.pci_int)); + seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.thin_int)); ++ (long)atomic_long_read_unchecked(&perf_stats.thin_int)); + seq_printf(m, "\n"); + seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.tasklet_inbound)); ++ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound)); + seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.tasklet_outbound)); ++ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound)); + seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n", +- (long)atomic_long_read(&perf_stats.tasklet_thinint), +- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop)); ++ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint), ++ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop)); + seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n", +- (long)atomic_long_read(&perf_stats.thinint_inbound), +- (long)atomic_long_read(&perf_stats.thinint_inbound_loop)); ++ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound), ++ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop)); + seq_printf(m, "\n"); + seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.siga_in)); ++ (long)atomic_long_read_unchecked(&perf_stats.siga_in)); + seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.siga_out)); ++ (long)atomic_long_read_unchecked(&perf_stats.siga_out)); + seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.siga_sync)); ++ (long)atomic_long_read_unchecked(&perf_stats.siga_sync)); + seq_printf(m, "\n"); + seq_printf(m, "Number of inbound transfers\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.inbound_handler)); ++ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler)); + seq_printf(m, "Number of outbound transfers\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.outbound_handler)); ++ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler)); + seq_printf(m, "\n"); + seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", +- (long)atomic_long_read(&perf_stats.fast_requeue)); ++ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue)); + seq_printf(m, "Number of outbound target full condition\t: %li\n", +- (long)atomic_long_read(&perf_stats.outbound_target_full)); ++ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full)); + seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", +- (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); ++ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer)); + seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", +- (long)atomic_long_read(&perf_stats.debug_stop_polling)); ++ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling)); + seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", +- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); ++ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2)); + seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n", +- (long)atomic_long_read(&perf_stats.debug_eqbs_all), +- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete)); ++ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all), ++ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete)); + seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n", +- (long)atomic_long_read(&perf_stats.debug_sqbs_all), +- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete)); ++ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all), ++ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete)); + seq_printf(m, "\n"); + return 0; + } +diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h +index ff4504c..b3604c3 100644 +--- a/drivers/s390/cio/qdio_perf.h ++++ b/drivers/s390/cio/qdio_perf.h +@@ -13,46 +13,46 @@ + + struct qdio_perf_stats { + /* interrupt handler calls */ +- atomic_long_t qdio_int; +- atomic_long_t pci_int; +- atomic_long_t thin_int; ++ atomic_long_unchecked_t qdio_int; ++ atomic_long_unchecked_t pci_int; ++ atomic_long_unchecked_t thin_int; + + /* tasklet runs */ +- atomic_long_t tasklet_inbound; +- atomic_long_t tasklet_outbound; +- atomic_long_t tasklet_thinint; +- atomic_long_t tasklet_thinint_loop; +- atomic_long_t thinint_inbound; +- atomic_long_t thinint_inbound_loop; +- atomic_long_t thinint_inbound_loop2; ++ atomic_long_unchecked_t tasklet_inbound; ++ atomic_long_unchecked_t tasklet_outbound; ++ atomic_long_unchecked_t tasklet_thinint; ++ atomic_long_unchecked_t tasklet_thinint_loop; ++ atomic_long_unchecked_t thinint_inbound; ++ atomic_long_unchecked_t thinint_inbound_loop; ++ atomic_long_unchecked_t thinint_inbound_loop2; + + /* signal adapter calls */ +- atomic_long_t siga_out; +- atomic_long_t siga_in; +- atomic_long_t siga_sync; ++ atomic_long_unchecked_t siga_out; ++ atomic_long_unchecked_t siga_in; ++ atomic_long_unchecked_t siga_sync; + + /* misc */ +- atomic_long_t inbound_handler; +- atomic_long_t outbound_handler; +- atomic_long_t fast_requeue; +- atomic_long_t outbound_target_full; ++ atomic_long_unchecked_t inbound_handler; ++ atomic_long_unchecked_t outbound_handler; ++ atomic_long_unchecked_t fast_requeue; ++ atomic_long_unchecked_t outbound_target_full; + + /* for debugging */ +- atomic_long_t debug_tl_out_timer; +- atomic_long_t debug_stop_polling; +- atomic_long_t debug_eqbs_all; +- atomic_long_t debug_eqbs_incomplete; +- atomic_long_t debug_sqbs_all; +- atomic_long_t debug_sqbs_incomplete; ++ atomic_long_unchecked_t debug_tl_out_timer; ++ atomic_long_unchecked_t debug_stop_polling; ++ atomic_long_unchecked_t debug_eqbs_all; ++ atomic_long_unchecked_t debug_eqbs_incomplete; ++ atomic_long_unchecked_t debug_sqbs_all; ++ atomic_long_unchecked_t debug_sqbs_incomplete; + }; + + extern struct qdio_perf_stats perf_stats; + extern int qdio_performance_stats; + +-static inline void qdio_perf_stat_inc(atomic_long_t *count) ++static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count) + { + if (qdio_performance_stats) +- atomic_long_inc(count); ++ atomic_long_inc_unchecked(count); + } + + int qdio_setup_perf_stats(void); +diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c +new file mode 100644 +index 0000000..7d18a18 +--- /dev/null ++++ b/drivers/scsi/3w-sas.c +@@ -0,0 +1,1933 @@ ++/* ++ 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. ++ ++ Written By: Adam Radford <linuxraid@lsi.com> ++ ++ Copyright (C) 2009 LSI Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; version 2 of the License. ++ ++ This program is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ NO WARRANTY ++ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR ++ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT ++ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, ++ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is ++ solely responsible for determining the appropriateness of using and ++ distributing the Program and assumes all risks associated with its ++ exercise of rights under this Agreement, including but not limited to ++ the risks and costs of program errors, damage to or loss of data, ++ programs or equipment, and unavailability or interruption of operations. ++ ++ DISCLAIMER OF LIABILITY ++ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY ++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ++ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR ++ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE ++ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED ++ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; if not, write to the Free Software ++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ ++ Controllers supported by this driver: ++ ++ LSI 3ware 9750 6Gb/s SAS/SATA-RAID ++ ++ Bugs/Comments/Suggestions should be mailed to: ++ linuxraid@lsi.com ++ ++ For more information, goto: ++ http://www.lsi.com ++ ++ History ++ ------- ++ 3.26.00.000 - Initial driver release. ++*/ ++ ++#include <linux/module.h> ++#include <linux/reboot.h> ++#include <linux/spinlock.h> ++#include <linux/interrupt.h> ++#include <linux/moduleparam.h> ++#include <linux/errno.h> ++#include <linux/types.h> ++#include <linux/delay.h> ++#include <linux/pci.h> ++#include <linux/time.h> ++#include <linux/mutex.h> ++#include <linux/smp_lock.h> ++#include <asm/io.h> ++#include <asm/irq.h> ++#include <asm/uaccess.h> ++#include <scsi/scsi.h> ++#include <scsi/scsi_host.h> ++#include <scsi/scsi_tcq.h> ++#include <scsi/scsi_cmnd.h> ++#include "3w-sas.h" ++ ++/* Globals */ ++#define TW_DRIVER_VERSION "3.26.00.028-2.6.32RH" ++static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT]; ++static unsigned int twl_device_extension_count; ++static int twl_major = -1; ++extern struct timezone sys_tz; ++ ++/* Module parameters */ ++MODULE_AUTHOR ("LSI"); ++MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(TW_DRIVER_VERSION); ++ ++static int use_msi = 0; ++module_param(use_msi, int, S_IRUGO); ++MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); ++ ++/* Function prototypes */ ++static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset); ++ ++/* Functions */ ++ ++/* This function returns AENs through sysfs */ ++static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *bin_attr, ++ char *outbuf, loff_t offset, size_t count) ++{ ++ struct device *dev = container_of(kobj, struct device, kobj); ++ struct Scsi_Host *shost = class_to_shost(dev); ++ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; ++ unsigned long flags = 0; ++ ssize_t ret; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EACCES; ++ ++ spin_lock_irqsave(tw_dev->host->host_lock, flags); ++ ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH); ++ spin_unlock_irqrestore(tw_dev->host->host_lock, flags); ++ ++ return ret; ++} /* End twl_sysfs_aen_read() */ ++ ++/* aen_read sysfs attribute initializer */ ++static struct bin_attribute twl_sysfs_aen_read_attr = { ++ .attr = { ++ .name = "3ware_aen_read", ++ .mode = S_IRUSR, ++ }, ++ .size = 0, ++ .read = twl_sysfs_aen_read ++}; ++ ++/* This function returns driver compatibility info through sysfs */ ++static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *bin_attr, ++ char *outbuf, loff_t offset, size_t count) ++{ ++ struct device *dev = container_of(kobj, struct device, kobj); ++ struct Scsi_Host *shost = class_to_shost(dev); ++ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata; ++ unsigned long flags = 0; ++ ssize_t ret; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EACCES; ++ ++ spin_lock_irqsave(tw_dev->host->host_lock, flags); ++ ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); ++ spin_unlock_irqrestore(tw_dev->host->host_lock, flags); ++ ++ return ret; ++} /* End twl_sysfs_compat_info() */ ++ ++/* compat_info sysfs attribute initializer */ ++static struct bin_attribute twl_sysfs_compat_info_attr = { ++ .attr = { ++ .name = "3ware_compat_info", ++ .mode = S_IRUSR, ++ }, ++ .size = 0, ++ .read = twl_sysfs_compat_info ++}; ++ ++/* Show some statistics about the card */ ++static ssize_t twl_show_stats(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct Scsi_Host *host = class_to_shost(dev); ++ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; ++ unsigned long flags = 0; ++ ssize_t len; ++ ++ spin_lock_irqsave(tw_dev->host->host_lock, flags); ++ len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n" ++ "Current commands posted: %4d\n" ++ "Max commands posted: %4d\n" ++ "Last sgl length: %4d\n" ++ "Max sgl length: %4d\n" ++ "Last sector count: %4d\n" ++ "Max sector count: %4d\n" ++ "SCSI Host Resets: %4d\n" ++ "AEN's: %4d\n", ++ TW_DRIVER_VERSION, ++ tw_dev->posted_request_count, ++ tw_dev->max_posted_request_count, ++ tw_dev->sgl_entries, ++ tw_dev->max_sgl_entries, ++ tw_dev->sector_count, ++ tw_dev->max_sector_count, ++ tw_dev->num_resets, ++ tw_dev->aen_count); ++ spin_unlock_irqrestore(tw_dev->host->host_lock, flags); ++ return len; ++} /* End twl_show_stats() */ ++ ++/* This function will set a devices queue depth */ ++static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth, ++ int reason) ++{ ++ if (reason != SCSI_QDEPTH_DEFAULT) ++ return -EOPNOTSUPP; ++ ++ if (queue_depth > TW_Q_LENGTH-2) ++ queue_depth = TW_Q_LENGTH-2; ++ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); ++ return queue_depth; ++} /* End twl_change_queue_depth() */ ++ ++/* stats sysfs attribute initializer */ ++static struct device_attribute twl_host_stats_attr = { ++ .attr = { ++ .name = "3ware_stats", ++ .mode = S_IRUGO, ++ }, ++ .show = twl_show_stats ++}; ++ ++/* Host attributes initializer */ ++static struct device_attribute *twl_host_attrs[] = { ++ &twl_host_stats_attr, ++ NULL, ++}; ++ ++/* This function will look up an AEN severity string */ ++static char *twl_aen_severity_lookup(unsigned char severity_code) ++{ ++ char *retval = NULL; ++ ++ if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || ++ (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) ++ goto out; ++ ++ retval = twl_aen_severity_table[severity_code]; ++out: ++ return retval; ++} /* End twl_aen_severity_lookup() */ ++ ++/* This function will queue an event */ ++static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) ++{ ++ u32 local_time; ++ struct timeval time; ++ TW_Event *event; ++ unsigned short aen; ++ char host[16]; ++ char *error_str; ++ ++ tw_dev->aen_count++; ++ ++ /* Fill out event info */ ++ event = tw_dev->event_queue[tw_dev->error_index]; ++ ++ host[0] = '\0'; ++ if (tw_dev->host) ++ sprintf(host, " scsi%d:", tw_dev->host->host_no); ++ ++ aen = le16_to_cpu(header->status_block.error); ++ memset(event, 0, sizeof(TW_Event)); ++ ++ event->severity = TW_SEV_OUT(header->status_block.severity__reserved); ++ do_gettimeofday(&time); ++ local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); ++ event->time_stamp_sec = local_time; ++ event->aen_code = aen; ++ event->retrieved = TW_AEN_NOT_RETRIEVED; ++ event->sequence_id = tw_dev->error_sequence_id; ++ tw_dev->error_sequence_id++; ++ ++ /* Check for embedded error string */ ++ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); ++ ++ header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; ++ event->parameter_len = strlen(header->err_specific_desc); ++ memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str)); ++ if (event->severity != TW_AEN_SEVERITY_DEBUG) ++ printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", ++ host, ++ twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), ++ TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str, ++ header->err_specific_desc); ++ else ++ tw_dev->aen_count--; ++ ++ tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; ++} /* End twl_aen_queue_event() */ ++ ++/* This function will attempt to post a command packet to the board */ ++static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) ++{ ++ dma_addr_t command_que_value; ++ ++ command_que_value = tw_dev->command_packet_phys[request_id]; ++ command_que_value += TW_COMMAND_OFFSET; ++ ++ /* First write upper 4 bytes */ ++ writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev)); ++ /* Then the lower 4 bytes */ ++ writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev)); ++ ++ tw_dev->state[request_id] = TW_S_POSTED; ++ tw_dev->posted_request_count++; ++ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) ++ tw_dev->max_posted_request_count = tw_dev->posted_request_count; ++ ++ return 0; ++} /* End twl_post_command_packet() */ ++ ++/* This function will perform a pci-dma mapping for a scatter gather list */ ++static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) ++{ ++ int use_sg; ++ struct scsi_cmnd *cmd = tw_dev->srb[request_id]; ++ ++ use_sg = scsi_dma_map(cmd); ++ if (!use_sg) ++ return 0; ++ else if (use_sg < 0) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); ++ return 0; ++ } ++ ++ cmd->SCp.phase = TW_PHASE_SGLIST; ++ cmd->SCp.have_data_in = use_sg; ++ ++ return use_sg; ++} /* End twl_map_scsi_sg_data() */ ++ ++/* This function hands scsi cdb's to the firmware */ ++static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) ++{ ++ TW_Command_Full *full_command_packet; ++ TW_Command_Apache *command_packet; ++ int i, sg_count; ++ struct scsi_cmnd *srb = NULL; ++ struct scatterlist *sglist = NULL, *sg; ++ int retval = 1; ++ ++ if (tw_dev->srb[request_id]) { ++ srb = tw_dev->srb[request_id]; ++ if (scsi_sglist(srb)) ++ sglist = scsi_sglist(srb); ++ } ++ ++ /* Initialize command packet */ ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ full_command_packet->header.header_desc.size_header = 128; ++ full_command_packet->header.status_block.error = 0; ++ full_command_packet->header.status_block.severity__reserved = 0; ++ ++ command_packet = &full_command_packet->command.newcommand; ++ command_packet->status = 0; ++ command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); ++ ++ /* We forced 16 byte cdb use earlier */ ++ if (!cdb) ++ memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); ++ else ++ memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); ++ ++ if (srb) { ++ command_packet->unit = srb->device->id; ++ command_packet->request_id__lunl = ++ cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id)); ++ } else { ++ command_packet->request_id__lunl = ++ cpu_to_le16(TW_REQ_LUN_IN(0, request_id)); ++ command_packet->unit = 0; ++ } ++ ++ command_packet->sgl_offset = 16; ++ ++ if (!sglistarg) { ++ /* Map sglist from scsi layer to cmd packet */ ++ if (scsi_sg_count(srb)) { ++ sg_count = twl_map_scsi_sg_data(tw_dev, request_id); ++ if (sg_count == 0) ++ goto out; ++ ++ scsi_for_each_sg(srb, sg, sg_count, i) { ++ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); ++ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg)); ++ } ++ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]))); ++ } ++ } else { ++ /* Internal cdb post */ ++ for (i = 0; i < use_sg; i++) { ++ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); ++ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length); ++ } ++ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg)); ++ } ++ ++ /* Update some stats */ ++ if (srb) { ++ tw_dev->sector_count = scsi_bufflen(srb) / 512; ++ if (tw_dev->sector_count > tw_dev->max_sector_count) ++ tw_dev->max_sector_count = tw_dev->sector_count; ++ tw_dev->sgl_entries = scsi_sg_count(srb); ++ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) ++ tw_dev->max_sgl_entries = tw_dev->sgl_entries; ++ } ++ ++ /* Now post the command to the board */ ++ retval = twl_post_command_packet(tw_dev, request_id); ++ ++out: ++ return retval; ++} /* End twl_scsiop_execute_scsi() */ ++ ++/* This function will read the aen queue from the isr */ ++static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) ++{ ++ char cdb[TW_MAX_CDB_LEN]; ++ TW_SG_Entry_ISO sglist[1]; ++ TW_Command_Full *full_command_packet; ++ int retval = 1; ++ ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ memset(full_command_packet, 0, sizeof(TW_Command_Full)); ++ ++ /* Initialize cdb */ ++ memset(&cdb, 0, TW_MAX_CDB_LEN); ++ cdb[0] = REQUEST_SENSE; /* opcode */ ++ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ ++ ++ /* Initialize sglist */ ++ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); ++ sglist[0].length = TW_SECTOR_SIZE; ++ sglist[0].address = tw_dev->generic_buffer_phys[request_id]; ++ ++ /* Mark internal command */ ++ tw_dev->srb[request_id] = NULL; ++ ++ /* Now post the command packet */ ++ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue"); ++ goto out; ++ } ++ retval = 0; ++out: ++ return retval; ++} /* End twl_aen_read_queue() */ ++ ++/* This function will sync firmware time with the host time */ ++static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) ++{ ++ u32 schedulertime; ++ struct timeval utc; ++ TW_Command_Full *full_command_packet; ++ TW_Command *command_packet; ++ TW_Param_Apache *param; ++ u32 local_time; ++ ++ /* Fill out the command packet */ ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ memset(full_command_packet, 0, sizeof(TW_Command_Full)); ++ command_packet = &full_command_packet->command.oldcommand; ++ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); ++ command_packet->request_id = request_id; ++ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); ++ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); ++ command_packet->size = TW_COMMAND_SIZE; ++ command_packet->byte6_offset.parameter_count = cpu_to_le16(1); ++ ++ /* Setup the param */ ++ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; ++ memset(param, 0, TW_SECTOR_SIZE); ++ param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ ++ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ ++ param->parameter_size_bytes = cpu_to_le16(4); ++ ++ /* Convert system time in UTC to local time seconds since last ++ Sunday 12:00AM */ ++ do_gettimeofday(&utc); ++ local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); ++ schedulertime = local_time - (3 * 86400); ++ schedulertime = cpu_to_le32(schedulertime % 604800); ++ ++ memcpy(param->data, &schedulertime, sizeof(u32)); ++ ++ /* Mark internal command */ ++ tw_dev->srb[request_id] = NULL; ++ ++ /* Now post the command */ ++ twl_post_command_packet(tw_dev, request_id); ++} /* End twl_aen_sync_time() */ ++ ++/* This function will assign an available request id */ ++static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id) ++{ ++ *request_id = tw_dev->free_queue[tw_dev->free_head]; ++ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; ++ tw_dev->state[*request_id] = TW_S_STARTED; ++} /* End twl_get_request_id() */ ++ ++/* This function will free a request id */ ++static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id) ++{ ++ tw_dev->free_queue[tw_dev->free_tail] = request_id; ++ tw_dev->state[request_id] = TW_S_FINISHED; ++ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; ++} /* End twl_free_request_id() */ ++ ++/* This function will complete an aen request from the isr */ ++static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id) ++{ ++ TW_Command_Full *full_command_packet; ++ TW_Command *command_packet; ++ TW_Command_Apache_Header *header; ++ unsigned short aen; ++ int retval = 1; ++ ++ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; ++ tw_dev->posted_request_count--; ++ aen = le16_to_cpu(header->status_block.error); ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ command_packet = &full_command_packet->command.oldcommand; ++ ++ /* First check for internal completion of set param for time sync */ ++ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { ++ /* Keep reading the queue in case there are more aen's */ ++ if (twl_aen_read_queue(tw_dev, request_id)) ++ goto out2; ++ else { ++ retval = 0; ++ goto out; ++ } ++ } ++ ++ switch (aen) { ++ case TW_AEN_QUEUE_EMPTY: ++ /* Quit reading the queue if this is the last one */ ++ break; ++ case TW_AEN_SYNC_TIME_WITH_HOST: ++ twl_aen_sync_time(tw_dev, request_id); ++ retval = 0; ++ goto out; ++ default: ++ twl_aen_queue_event(tw_dev, header); ++ ++ /* If there are more aen's, keep reading the queue */ ++ if (twl_aen_read_queue(tw_dev, request_id)) ++ goto out2; ++ else { ++ retval = 0; ++ goto out; ++ } ++ } ++ retval = 0; ++out2: ++ tw_dev->state[request_id] = TW_S_COMPLETED; ++ twl_free_request_id(tw_dev, request_id); ++ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); ++out: ++ return retval; ++} /* End twl_aen_complete() */ ++ ++/* This function will poll for a response */ ++static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) ++{ ++ unsigned long before; ++ dma_addr_t mfa; ++ u32 regh, regl; ++ u32 response; ++ int retval = 1; ++ int found = 0; ++ ++ before = jiffies; ++ ++ while (!found) { ++ if (sizeof(dma_addr_t) > 4) { ++ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); ++ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); ++ mfa = ((u64)regh << 32) | regl; ++ } else ++ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); ++ ++ response = (u32)mfa; ++ ++ if (TW_RESID_OUT(response) == request_id) ++ found = 1; ++ ++ if (time_after(jiffies, before + HZ * seconds)) ++ goto out; ++ ++ msleep(50); ++ } ++ retval = 0; ++out: ++ return retval; ++} /* End twl_poll_response() */ ++ ++/* This function will drain the aen queue */ ++static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) ++{ ++ int request_id = 0; ++ char cdb[TW_MAX_CDB_LEN]; ++ TW_SG_Entry_ISO sglist[1]; ++ int finished = 0, count = 0; ++ TW_Command_Full *full_command_packet; ++ TW_Command_Apache_Header *header; ++ unsigned short aen; ++ int first_reset = 0, queue = 0, retval = 1; ++ ++ if (no_check_reset) ++ first_reset = 0; ++ else ++ first_reset = 1; ++ ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ memset(full_command_packet, 0, sizeof(TW_Command_Full)); ++ ++ /* Initialize cdb */ ++ memset(&cdb, 0, TW_MAX_CDB_LEN); ++ cdb[0] = REQUEST_SENSE; /* opcode */ ++ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ ++ ++ /* Initialize sglist */ ++ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO)); ++ sglist[0].length = TW_SECTOR_SIZE; ++ sglist[0].address = tw_dev->generic_buffer_phys[request_id]; ++ ++ /* Mark internal command */ ++ tw_dev->srb[request_id] = NULL; ++ ++ do { ++ /* Send command to the board */ ++ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense"); ++ goto out; ++ } ++ ++ /* Now poll for completion */ ++ if (twl_poll_response(tw_dev, request_id, 30)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue"); ++ tw_dev->posted_request_count--; ++ goto out; ++ } ++ ++ tw_dev->posted_request_count--; ++ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; ++ aen = le16_to_cpu(header->status_block.error); ++ queue = 0; ++ count++; ++ ++ switch (aen) { ++ case TW_AEN_QUEUE_EMPTY: ++ if (first_reset != 1) ++ goto out; ++ else ++ finished = 1; ++ break; ++ case TW_AEN_SOFT_RESET: ++ if (first_reset == 0) ++ first_reset = 1; ++ else ++ queue = 1; ++ break; ++ case TW_AEN_SYNC_TIME_WITH_HOST: ++ break; ++ default: ++ queue = 1; ++ } ++ ++ /* Now queue an event info */ ++ if (queue) ++ twl_aen_queue_event(tw_dev, header); ++ } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); ++ ++ if (count == TW_MAX_AEN_DRAIN) ++ goto out; ++ ++ retval = 0; ++out: ++ tw_dev->state[request_id] = TW_S_INITIAL; ++ return retval; ++} /* End twl_aen_drain_queue() */ ++ ++/* This function will allocate memory and check if it is correctly aligned */ ++static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) ++{ ++ int i; ++ dma_addr_t dma_handle; ++ unsigned long *cpu_addr; ++ int retval = 1; ++ ++ cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); ++ if (!cpu_addr) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); ++ goto out; ++ } ++ ++ memset(cpu_addr, 0, size*TW_Q_LENGTH); ++ ++ for (i = 0; i < TW_Q_LENGTH; i++) { ++ switch(which) { ++ case 0: ++ tw_dev->command_packet_phys[i] = dma_handle+(i*size); ++ tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); ++ break; ++ case 1: ++ tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); ++ tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); ++ break; ++ case 2: ++ tw_dev->sense_buffer_phys[i] = dma_handle+(i*size); ++ tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size)); ++ break; ++ } ++ } ++ retval = 0; ++out: ++ return retval; ++} /* End twl_allocate_memory() */ ++ ++/* This function will load the request id and various sgls for ioctls */ ++static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) ++{ ++ TW_Command *oldcommand; ++ TW_Command_Apache *newcommand; ++ TW_SG_Entry_ISO *sgl; ++ unsigned int pae = 0; ++ ++ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) ++ pae = 1; ++ ++ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { ++ newcommand = &full_command_packet->command.newcommand; ++ newcommand->request_id__lunl = ++ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); ++ if (length) { ++ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); ++ newcommand->sg_list[0].length = TW_CPU_TO_SGL(length); ++ } ++ newcommand->sgl_entries__lunh = ++ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0)); ++ } else { ++ oldcommand = &full_command_packet->command.oldcommand; ++ oldcommand->request_id = request_id; ++ ++ if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { ++ /* Load the sg list */ ++ sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0)); ++ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); ++ sgl->length = TW_CPU_TO_SGL(length); ++ oldcommand->size += pae; ++ oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0; ++ } ++ } ++} /* End twl_load_sgl() */ ++ ++/* This function handles ioctl for the character device ++ This interface is used by smartmontools open source software */ ++static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ long timeout; ++ unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; ++ dma_addr_t dma_handle; ++ int request_id = 0; ++ TW_Ioctl_Driver_Command driver_command; ++ TW_Ioctl_Buf_Apache *tw_ioctl; ++ TW_Command_Full *full_command_packet; ++ TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; ++ int retval = -EFAULT; ++ void __user *argp = (void __user *)arg; ++ ++ /* Only let one of these through at a time */ ++ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { ++ retval = -EINTR; ++ goto out; ++ } ++ ++ /* First copy down the driver command */ ++ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) ++ goto out2; ++ ++ /* Check data buffer size */ ++ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { ++ retval = -EINVAL; ++ goto out2; ++ } ++ ++ /* Hardware can only do multiple of 512 byte transfers */ ++ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; ++ ++ /* Now allocate ioctl buf memory */ ++ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL); ++ if (!cpu_addr) { ++ retval = -ENOMEM; ++ goto out2; ++ } ++ ++ tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; ++ ++ /* Now copy down the entire ioctl */ ++ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) ++ goto out3; ++ ++ /* See which ioctl we are doing */ ++ switch (cmd) { ++ case TW_IOCTL_FIRMWARE_PASS_THROUGH: ++ spin_lock_irqsave(tw_dev->host->host_lock, flags); ++ twl_get_request_id(tw_dev, &request_id); ++ ++ /* Flag internal command */ ++ tw_dev->srb[request_id] = NULL; ++ ++ /* Flag chrdev ioctl */ ++ tw_dev->chrdev_request_id = request_id; ++ ++ full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command; ++ ++ /* Load request id and sglist for both command types */ ++ twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); ++ ++ memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); ++ ++ /* Now post the command packet to the controller */ ++ twl_post_command_packet(tw_dev, request_id); ++ spin_unlock_irqrestore(tw_dev->host->host_lock, flags); ++ ++ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; ++ ++ /* Now wait for command to complete */ ++ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); ++ ++ /* We timed out, and didn't get an interrupt */ ++ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { ++ /* Now we need to reset the board */ ++ printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", ++ tw_dev->host->host_no, TW_DRIVER, 0x6, ++ cmd); ++ retval = -EIO; ++ twl_reset_device_extension(tw_dev, 1); ++ goto out3; ++ } ++ ++ /* Now copy in the command packet response */ ++ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); ++ ++ /* Now complete the io */ ++ spin_lock_irqsave(tw_dev->host->host_lock, flags); ++ tw_dev->posted_request_count--; ++ tw_dev->state[request_id] = TW_S_COMPLETED; ++ twl_free_request_id(tw_dev, request_id); ++ spin_unlock_irqrestore(tw_dev->host->host_lock, flags); ++ break; ++ default: ++ retval = -ENOTTY; ++ goto out3; ++ } ++ ++ /* Now copy the entire response to userspace */ ++ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) ++ retval = 0; ++out3: ++ /* Now free ioctl buf memory */ ++ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); ++out2: ++ mutex_unlock(&tw_dev->ioctl_lock); ++out: ++ return retval; ++} /* End twl_chrdev_ioctl() */ ++ ++/* This function handles open for the character device */ ++static int twl_chrdev_open(struct inode *inode, struct file *file) ++{ ++ unsigned int minor_number; ++ int retval = -ENODEV; ++ ++ if (!capable(CAP_SYS_ADMIN)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ cycle_kernel_lock(); ++ minor_number = iminor(inode); ++ if (minor_number >= twl_device_extension_count) ++ goto out; ++ retval = 0; ++out: ++ return retval; ++} /* End twl_chrdev_open() */ ++ ++/* File operations struct for character device */ ++static const struct file_operations twl_fops = { ++ .owner = THIS_MODULE, ++ .ioctl = twl_chrdev_ioctl, ++ .open = twl_chrdev_open, ++ .release = NULL ++}; ++ ++/* This function passes sense data from firmware to scsi layer */ ++static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host) ++{ ++ TW_Command_Apache_Header *header; ++ TW_Command_Full *full_command_packet; ++ unsigned short error; ++ char *error_str; ++ int retval = 1; ++ ++ header = tw_dev->sense_buffer_virt[i]; ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ ++ /* Get embedded firmware error string */ ++ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]); ++ ++ /* Don't print error for Logical unit not supported during rollcall */ ++ error = le16_to_cpu(header->status_block.error); ++ if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) { ++ if (print_host) ++ printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", ++ tw_dev->host->host_no, ++ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, ++ header->status_block.error, ++ error_str, ++ header->err_specific_desc); ++ else ++ printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n", ++ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, ++ header->status_block.error, ++ error_str, ++ header->err_specific_desc); ++ } ++ ++ if (copy_sense) { ++ memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH); ++ tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); ++ goto out; ++ } ++out: ++ return retval; ++} /* End twl_fill_sense() */ ++ ++/* This function will free up device extension resources */ ++static void twl_free_device_extension(TW_Device_Extension *tw_dev) ++{ ++ if (tw_dev->command_packet_virt[0]) ++ pci_free_consistent(tw_dev->tw_pci_dev, ++ sizeof(TW_Command_Full)*TW_Q_LENGTH, ++ tw_dev->command_packet_virt[0], ++ tw_dev->command_packet_phys[0]); ++ ++ if (tw_dev->generic_buffer_virt[0]) ++ pci_free_consistent(tw_dev->tw_pci_dev, ++ TW_SECTOR_SIZE*TW_Q_LENGTH, ++ tw_dev->generic_buffer_virt[0], ++ tw_dev->generic_buffer_phys[0]); ++ ++ if (tw_dev->sense_buffer_virt[0]) ++ pci_free_consistent(tw_dev->tw_pci_dev, ++ sizeof(TW_Command_Apache_Header)* ++ TW_Q_LENGTH, ++ tw_dev->sense_buffer_virt[0], ++ tw_dev->sense_buffer_phys[0]); ++ ++ kfree(tw_dev->event_queue[0]); ++} /* End twl_free_device_extension() */ ++ ++/* This function will get parameter table entries from the firmware */ ++static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) ++{ ++ TW_Command_Full *full_command_packet; ++ TW_Command *command_packet; ++ TW_Param_Apache *param; ++ void *retval = NULL; ++ ++ /* Setup the command packet */ ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ memset(full_command_packet, 0, sizeof(TW_Command_Full)); ++ command_packet = &full_command_packet->command.oldcommand; ++ ++ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); ++ command_packet->size = TW_COMMAND_SIZE; ++ command_packet->request_id = request_id; ++ command_packet->byte6_offset.block_count = cpu_to_le16(1); ++ ++ /* Now setup the param */ ++ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; ++ memset(param, 0, TW_SECTOR_SIZE); ++ param->table_id = cpu_to_le16(table_id | 0x8000); ++ param->parameter_id = cpu_to_le16(parameter_id); ++ param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); ++ ++ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); ++ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE); ++ ++ /* Post the command packet to the board */ ++ twl_post_command_packet(tw_dev, request_id); ++ ++ /* Poll for completion */ ++ if (twl_poll_response(tw_dev, request_id, 30)) ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param") ++ else ++ retval = (void *)&(param->data[0]); ++ ++ tw_dev->posted_request_count--; ++ tw_dev->state[request_id] = TW_S_INITIAL; ++ ++ return retval; ++} /* End twl_get_param() */ ++ ++/* This function will send an initconnection command to controller */ ++static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits, ++ u32 set_features, unsigned short current_fw_srl, ++ unsigned short current_fw_arch_id, ++ unsigned short current_fw_branch, ++ unsigned short current_fw_build, ++ unsigned short *fw_on_ctlr_srl, ++ unsigned short *fw_on_ctlr_arch_id, ++ unsigned short *fw_on_ctlr_branch, ++ unsigned short *fw_on_ctlr_build, ++ u32 *init_connect_result) ++{ ++ TW_Command_Full *full_command_packet; ++ TW_Initconnect *tw_initconnect; ++ int request_id = 0, retval = 1; ++ ++ /* Initialize InitConnection command packet */ ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ memset(full_command_packet, 0, sizeof(TW_Command_Full)); ++ full_command_packet->header.header_desc.size_header = 128; ++ ++ tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; ++ tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); ++ tw_initconnect->request_id = request_id; ++ tw_initconnect->message_credits = cpu_to_le16(message_credits); ++ tw_initconnect->features = set_features; ++ ++ /* Turn on 64-bit sgl support if we need to */ ++ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0; ++ ++ tw_initconnect->features = cpu_to_le32(tw_initconnect->features); ++ ++ if (set_features & TW_EXTENDED_INIT_CONNECT) { ++ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; ++ tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); ++ tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); ++ tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); ++ tw_initconnect->fw_build = cpu_to_le16(current_fw_build); ++ } else ++ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; ++ ++ /* Send command packet to the board */ ++ twl_post_command_packet(tw_dev, request_id); ++ ++ /* Poll for completion */ ++ if (twl_poll_response(tw_dev, request_id, 30)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection"); ++ } else { ++ if (set_features & TW_EXTENDED_INIT_CONNECT) { ++ *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); ++ *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); ++ *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); ++ *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); ++ *init_connect_result = le32_to_cpu(tw_initconnect->result); ++ } ++ retval = 0; ++ } ++ ++ tw_dev->posted_request_count--; ++ tw_dev->state[request_id] = TW_S_INITIAL; ++ ++ return retval; ++} /* End twl_initconnection() */ ++ ++/* This function will initialize the fields of a device extension */ ++static int twl_initialize_device_extension(TW_Device_Extension *tw_dev) ++{ ++ int i, retval = 1; ++ ++ /* Initialize command packet buffers */ ++ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed"); ++ goto out; ++ } ++ ++ /* Initialize generic buffer */ ++ if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed"); ++ goto out; ++ } ++ ++ /* Allocate sense buffers */ ++ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed"); ++ goto out; ++ } ++ ++ /* Allocate event info space */ ++ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); ++ if (!tw_dev->event_queue[0]) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed"); ++ goto out; ++ } ++ ++ for (i = 0; i < TW_Q_LENGTH; i++) { ++ tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); ++ tw_dev->free_queue[i] = i; ++ tw_dev->state[i] = TW_S_INITIAL; ++ } ++ ++ tw_dev->free_head = TW_Q_START; ++ tw_dev->free_tail = TW_Q_START; ++ tw_dev->error_sequence_id = 1; ++ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; ++ ++ mutex_init(&tw_dev->ioctl_lock); ++ init_waitqueue_head(&tw_dev->ioctl_wqueue); ++ ++ retval = 0; ++out: ++ return retval; ++} /* End twl_initialize_device_extension() */ ++ ++/* This function will perform a pci-dma unmap */ ++static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) ++{ ++ struct scsi_cmnd *cmd = tw_dev->srb[request_id]; ++ ++ if (cmd->SCp.phase == TW_PHASE_SGLIST) ++ scsi_dma_unmap(cmd); ++} /* End twl_unmap_scsi_data() */ ++ ++/* This function will handle attention interrupts */ ++static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) ++{ ++ int retval = 1; ++ u32 request_id, doorbell; ++ ++ /* Read doorbell status */ ++ doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev)); ++ ++ /* Check for controller errors */ ++ if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing"); ++ goto out; ++ } ++ ++ /* Check if we need to perform an AEN drain */ ++ if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) { ++ if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { ++ twl_get_request_id(tw_dev, &request_id); ++ if (twl_aen_read_queue(tw_dev, request_id)) { ++ tw_dev->state[request_id] = TW_S_COMPLETED; ++ twl_free_request_id(tw_dev, request_id); ++ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); ++ } ++ } ++ } ++ ++ retval = 0; ++out: ++ /* Clear doorbell interrupt */ ++ TWL_CLEAR_DB_INTERRUPT(tw_dev); ++ ++ /* Make sure the clear was flushed by reading it back */ ++ readl(TWL_HOBDBC_REG_ADDR(tw_dev)); ++ ++ return retval; ++} /* End twl_handle_attention_interrupt() */ ++ ++/* Interrupt service routine */ ++static irqreturn_t twl_interrupt(int irq, void *dev_instance) ++{ ++ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; ++ int i, handled = 0, error = 0; ++ dma_addr_t mfa = 0; ++ u32 reg, regl, regh, response, request_id = 0; ++ struct scsi_cmnd *cmd; ++ TW_Command_Full *full_command_packet; ++ ++ spin_lock(tw_dev->host->host_lock); ++ ++ /* Read host interrupt status */ ++ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); ++ ++ /* Check if this is our interrupt, otherwise bail */ ++ if (!(reg & TWL_HISTATUS_VALID_INTERRUPT)) ++ goto twl_interrupt_bail; ++ ++ handled = 1; ++ ++ /* If we are resetting, bail */ ++ if (test_bit(TW_IN_RESET, &tw_dev->flags)) ++ goto twl_interrupt_bail; ++ ++ /* Attention interrupt */ ++ if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) { ++ if (twl_handle_attention_interrupt(tw_dev)) { ++ TWL_MASK_INTERRUPTS(tw_dev); ++ goto twl_interrupt_bail; ++ } ++ } ++ ++ /* Response interrupt */ ++ while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) { ++ if (sizeof(dma_addr_t) > 4) { ++ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev)); ++ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); ++ mfa = ((u64)regh << 32) | regl; ++ } else ++ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev)); ++ ++ error = 0; ++ response = (u32)mfa; ++ ++ /* Check for command packet error */ ++ if (!TW_NOTMFA_OUT(response)) { ++ for (i=0;i<TW_Q_LENGTH;i++) { ++ if (tw_dev->sense_buffer_phys[i] == mfa) { ++ request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id); ++ if (tw_dev->srb[request_id] != NULL) ++ error = twl_fill_sense(tw_dev, i, request_id, 1, 1); ++ else { ++ /* Skip ioctl error prints */ ++ if (request_id != tw_dev->chrdev_request_id) ++ error = twl_fill_sense(tw_dev, i, request_id, 0, 1); ++ else ++ memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header)); ++ } ++ ++ /* Now re-post the sense buffer */ ++ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); ++ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); ++ break; ++ } ++ } ++ } else ++ request_id = TW_RESID_OUT(response); ++ ++ full_command_packet = tw_dev->command_packet_virt[request_id]; ++ ++ /* Check for correct state */ ++ if (tw_dev->state[request_id] != TW_S_POSTED) { ++ if (tw_dev->srb[request_id] != NULL) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted"); ++ TWL_MASK_INTERRUPTS(tw_dev); ++ goto twl_interrupt_bail; ++ } ++ } ++ ++ /* Check for internal command completion */ ++ if (tw_dev->srb[request_id] == NULL) { ++ if (request_id != tw_dev->chrdev_request_id) { ++ if (twl_aen_complete(tw_dev, request_id)) ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt"); ++ } else { ++ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; ++ wake_up(&tw_dev->ioctl_wqueue); ++ } ++ } else { ++ cmd = tw_dev->srb[request_id]; ++ ++ if (!error) ++ cmd->result = (DID_OK << 16); ++ ++ /* Report residual bytes for single sgl */ ++ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { ++ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) ++ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); ++ } ++ ++ /* Now complete the io */ ++ tw_dev->state[request_id] = TW_S_COMPLETED; ++ twl_free_request_id(tw_dev, request_id); ++ tw_dev->posted_request_count--; ++ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); ++ twl_unmap_scsi_data(tw_dev, request_id); ++ } ++ ++ /* Check for another response interrupt */ ++ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev)); ++ } ++ ++twl_interrupt_bail: ++ spin_unlock(tw_dev->host->host_lock); ++ return IRQ_RETVAL(handled); ++} /* End twl_interrupt() */ ++ ++/* This function will poll for a register change */ ++static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds) ++{ ++ unsigned long before; ++ int retval = 1; ++ u32 reg_value; ++ ++ reg_value = readl(reg); ++ before = jiffies; ++ ++ while ((reg_value & value) != result) { ++ reg_value = readl(reg); ++ if (time_after(jiffies, before + HZ * seconds)) ++ goto out; ++ msleep(50); ++ } ++ retval = 0; ++out: ++ return retval; ++} /* End twl_poll_register() */ ++ ++/* This function will reset a controller */ ++static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) ++{ ++ int retval = 1; ++ int i = 0; ++ u32 status = 0; ++ unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; ++ unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; ++ u32 init_connect_result = 0; ++ int tries = 0; ++ int do_soft_reset = soft_reset; ++ ++ while (tries < TW_MAX_RESET_TRIES) { ++ /* Do a soft reset if one is needed */ ++ if (do_soft_reset) { ++ TWL_SOFT_RESET(tw_dev); ++ ++ /* Make sure controller is in a good state */ ++ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence"); ++ tries++; ++ continue; ++ } ++ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence"); ++ tries++; ++ continue; ++ } ++ } ++ ++ /* Initconnect */ ++ if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, ++ TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, ++ TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, ++ TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, ++ &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, ++ &fw_on_ctlr_build, &init_connect_result)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL"); ++ do_soft_reset = 1; ++ tries++; ++ continue; ++ } ++ ++ /* Load sense buffers */ ++ while (i < TW_Q_LENGTH) { ++ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev)); ++ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev)); ++ ++ /* Check status for over-run after each write */ ++ status = readl(TWL_STATUS_REG_ADDR(tw_dev)); ++ if (!(status & TWL_STATUS_OVERRUN_SUBMIT)) ++ i++; ++ } ++ ++ /* Now check status */ ++ status = readl(TWL_STATUS_REG_ADDR(tw_dev)); ++ if (status) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers"); ++ do_soft_reset = 1; ++ tries++; ++ continue; ++ } ++ ++ /* Drain the AEN queue */ ++ if (twl_aen_drain_queue(tw_dev, soft_reset)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence"); ++ do_soft_reset = 1; ++ tries++; ++ continue; ++ } ++ ++ /* Load rest of compatibility struct */ ++ strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); ++ tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; ++ tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; ++ tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; ++ tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; ++ tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; ++ tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; ++ tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; ++ tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; ++ tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; ++ ++ /* If we got here, controller is in a good state */ ++ retval = 0; ++ goto out; ++ } ++out: ++ return retval; ++} /* End twl_reset_sequence() */ ++ ++/* This function will reset a device extension */ ++static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset) ++{ ++ int i = 0, retval = 1; ++ unsigned long flags = 0; ++ ++ /* Block SCSI requests while we are resetting */ ++ if (ioctl_reset) ++ scsi_block_requests(tw_dev->host); ++ ++ set_bit(TW_IN_RESET, &tw_dev->flags); ++ TWL_MASK_INTERRUPTS(tw_dev); ++ TWL_CLEAR_DB_INTERRUPT(tw_dev); ++ ++ spin_lock_irqsave(tw_dev->host->host_lock, flags); ++ ++ /* Abort all requests that are in progress */ ++ for (i = 0; i < TW_Q_LENGTH; i++) { ++ if ((tw_dev->state[i] != TW_S_FINISHED) && ++ (tw_dev->state[i] != TW_S_INITIAL) && ++ (tw_dev->state[i] != TW_S_COMPLETED)) { ++ if (tw_dev->srb[i]) { ++ tw_dev->srb[i]->result = (DID_RESET << 16); ++ tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); ++ twl_unmap_scsi_data(tw_dev, i); ++ } ++ } ++ } ++ ++ /* Reset queues and counts */ ++ for (i = 0; i < TW_Q_LENGTH; i++) { ++ tw_dev->free_queue[i] = i; ++ tw_dev->state[i] = TW_S_INITIAL; ++ } ++ tw_dev->free_head = TW_Q_START; ++ tw_dev->free_tail = TW_Q_START; ++ tw_dev->posted_request_count = 0; ++ ++ spin_unlock_irqrestore(tw_dev->host->host_lock, flags); ++ ++ if (twl_reset_sequence(tw_dev, 1)) ++ goto out; ++ ++ TWL_UNMASK_INTERRUPTS(tw_dev); ++ ++ clear_bit(TW_IN_RESET, &tw_dev->flags); ++ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; ++ ++ retval = 0; ++out: ++ if (ioctl_reset) ++ scsi_unblock_requests(tw_dev->host); ++ return retval; ++} /* End twl_reset_device_extension() */ ++ ++/* This funciton returns unit geometry in cylinders/heads/sectors */ ++static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) ++{ ++ int heads, sectors, cylinders; ++ TW_Device_Extension *tw_dev; ++ ++ tw_dev = (TW_Device_Extension *)sdev->host->hostdata; ++ ++ if (capacity >= 0x200000) { ++ heads = 255; ++ sectors = 63; ++ cylinders = sector_div(capacity, heads * sectors); ++ } else { ++ heads = 64; ++ sectors = 32; ++ cylinders = sector_div(capacity, heads * sectors); ++ } ++ ++ geom[0] = heads; ++ geom[1] = sectors; ++ geom[2] = cylinders; ++ ++ return 0; ++} /* End twl_scsi_biosparam() */ ++ ++/* This is the new scsi eh reset function */ ++static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt) ++{ ++ TW_Device_Extension *tw_dev = NULL; ++ int retval = FAILED; ++ ++ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; ++ ++ tw_dev->num_resets++; ++ ++ sdev_printk(KERN_WARNING, SCpnt->device, ++ "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", ++ TW_DRIVER, 0x2c, SCpnt->cmnd[0]); ++ ++ /* Make sure we are not issuing an ioctl or resetting from ioctl */ ++ mutex_lock(&tw_dev->ioctl_lock); ++ ++ /* Now reset the card and some of the device extension data */ ++ if (twl_reset_device_extension(tw_dev, 0)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset"); ++ goto out; ++ } ++ ++ retval = SUCCESS; ++out: ++ mutex_unlock(&tw_dev->ioctl_lock); ++ return retval; ++} /* End twl_scsi_eh_reset() */ ++ ++/* This is the main scsi queue function to handle scsi opcodes */ ++static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) ++{ ++ int request_id, retval; ++ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; ++ ++ /* If we are resetting due to timed out ioctl, report as busy */ ++ if (test_bit(TW_IN_RESET, &tw_dev->flags)) { ++ retval = SCSI_MLQUEUE_HOST_BUSY; ++ goto out; ++ } ++ ++ /* Save done function into scsi_cmnd struct */ ++ SCpnt->scsi_done = done; ++ ++ /* Get a free request id */ ++ twl_get_request_id(tw_dev, &request_id); ++ ++ /* Save the scsi command for use by the ISR */ ++ tw_dev->srb[request_id] = SCpnt; ++ ++ /* Initialize phase to zero */ ++ SCpnt->SCp.phase = TW_PHASE_INITIAL; ++ ++ retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); ++ if (retval) { ++ tw_dev->state[request_id] = TW_S_COMPLETED; ++ twl_free_request_id(tw_dev, request_id); ++ SCpnt->result = (DID_ERROR << 16); ++ done(SCpnt); ++ retval = 0; ++ } ++out: ++ return retval; ++} /* End twl_scsi_queue() */ ++ ++/* This function tells the controller to shut down */ ++static void __twl_shutdown(TW_Device_Extension *tw_dev) ++{ ++ /* Disable interrupts */ ++ TWL_MASK_INTERRUPTS(tw_dev); ++ ++ /* Free up the IRQ */ ++ free_irq(tw_dev->tw_pci_dev->irq, tw_dev); ++ ++ printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no); ++ ++ /* Tell the card we are shutting down */ ++ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed"); ++ } else { ++ printk(KERN_WARNING "3w-sas: Shutdown complete.\n"); ++ } ++ ++ /* Clear doorbell interrupt just before exit */ ++ TWL_CLEAR_DB_INTERRUPT(tw_dev); ++} /* End __twl_shutdown() */ ++ ++/* Wrapper for __twl_shutdown */ ++static void twl_shutdown(struct pci_dev *pdev) ++{ ++ struct Scsi_Host *host = pci_get_drvdata(pdev); ++ TW_Device_Extension *tw_dev; ++ ++ if (!host) ++ return; ++ ++ tw_dev = (TW_Device_Extension *)host->hostdata; ++ ++ if (tw_dev->online) ++ __twl_shutdown(tw_dev); ++} /* End twl_shutdown() */ ++ ++/* This function configures unit settings when a unit is coming on-line */ ++static int twl_slave_configure(struct scsi_device *sdev) ++{ ++ /* Force 60 second timeout */ ++ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); ++ ++ return 0; ++} /* End twl_slave_configure() */ ++ ++/* scsi_host_template initializer */ ++static struct scsi_host_template driver_template = { ++ .module = THIS_MODULE, ++ .name = "3w-sas", ++ .queuecommand = twl_scsi_queue, ++ .eh_host_reset_handler = twl_scsi_eh_reset, ++ .bios_param = twl_scsi_biosparam, ++ .change_queue_depth = twl_change_queue_depth, ++ .can_queue = TW_Q_LENGTH-2, ++ .slave_configure = twl_slave_configure, ++ .this_id = -1, ++ .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH, ++ .max_sectors = TW_MAX_SECTORS, ++ .cmd_per_lun = TW_MAX_CMDS_PER_LUN, ++ .use_clustering = ENABLE_CLUSTERING, ++ .shost_attrs = twl_host_attrs, ++ .emulated = 1 ++}; ++ ++/* This function will probe and initialize a card */ ++static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) ++{ ++ struct Scsi_Host *host = NULL; ++ TW_Device_Extension *tw_dev; ++ resource_size_t mem_addr, mem_len; ++ int retval = -ENODEV; ++ int *ptr_phycount, phycount=0; ++ ++ retval = pci_enable_device(pdev); ++ if (retval) { ++ TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device"); ++ goto out_disable_device; ++ } ++ ++ pci_set_master(pdev); ++ pci_try_set_mwi(pdev); ++ ++ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ++ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) ++ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ++ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { ++ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); ++ retval = -ENODEV; ++ goto out_disable_device; ++ } ++ ++ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); ++ if (!host) { ++ TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension"); ++ retval = -ENOMEM; ++ goto out_disable_device; ++ } ++ tw_dev = (TW_Device_Extension *)host->hostdata; ++ ++ /* Save values to device extension */ ++ tw_dev->host = host; ++ tw_dev->tw_pci_dev = pdev; ++ ++ if (twl_initialize_device_extension(tw_dev)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); ++ goto out_free_device_extension; ++ } ++ ++ /* Request IO regions */ ++ retval = pci_request_regions(pdev, "3w-sas"); ++ if (retval) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region"); ++ goto out_free_device_extension; ++ } ++ ++ /* Use region 1 */ ++ mem_addr = pci_resource_start(pdev, 1); ++ mem_len = pci_resource_len(pdev, 1); ++ ++ /* Save base address */ ++ tw_dev->base_addr = ioremap(mem_addr, mem_len); ++ ++ if (!tw_dev->base_addr) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); ++ goto out_release_mem_region; ++ } ++ ++ /* Disable interrupts on the card */ ++ TWL_MASK_INTERRUPTS(tw_dev); ++ ++ /* Initialize the card */ ++ if (twl_reset_sequence(tw_dev, 0)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); ++ goto out_iounmap; ++ } ++ ++ /* Set host specific parameters */ ++ host->max_id = TW_MAX_UNITS; ++ host->max_cmd_len = TW_MAX_CDB_LEN; ++ host->max_lun = TW_MAX_LUNS; ++ host->max_channel = 0; ++ ++ /* Register the card with the kernel SCSI layer */ ++ retval = scsi_add_host(host, &pdev->dev); ++ if (retval) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed"); ++ goto out_iounmap; ++ } ++ ++ pci_set_drvdata(pdev, host); ++ ++ printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n", ++ host->host_no, ++ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, ++ TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH), ++ (u64)mem_addr, pdev->irq); ++ ++ ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE, ++ TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH); ++ if (ptr_phycount) ++ phycount = le32_to_cpu(*(int *)ptr_phycount); ++ ++ printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n", ++ host->host_no, ++ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE, ++ TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), ++ (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE, ++ TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), ++ phycount); ++ ++ /* Try to enable MSI */ ++ if (use_msi && !pci_enable_msi(pdev)) ++ set_bit(TW_USING_MSI, &tw_dev->flags); ++ ++ /* Now setup the interrupt handler */ ++ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); ++ if (retval) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ"); ++ goto out_remove_host; ++ } ++ ++ twl_device_extension_list[twl_device_extension_count] = tw_dev; ++ twl_device_extension_count++; ++ ++ /* Re-enable interrupts on the card */ ++ TWL_UNMASK_INTERRUPTS(tw_dev); ++ ++ /* Finally, scan the host */ ++ scsi_scan_host(host); ++ ++ /* Add sysfs binary files */ ++ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr)) ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read"); ++ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr)) ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info"); ++ ++ if (twl_major == -1) { ++ if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0) ++ TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device"); ++ } ++ tw_dev->online = 1; ++ return 0; ++ ++out_remove_host: ++ if (test_bit(TW_USING_MSI, &tw_dev->flags)) ++ pci_disable_msi(pdev); ++ scsi_remove_host(host); ++out_iounmap: ++ iounmap(tw_dev->base_addr); ++out_release_mem_region: ++ pci_release_regions(pdev); ++out_free_device_extension: ++ twl_free_device_extension(tw_dev); ++ scsi_host_put(host); ++out_disable_device: ++ pci_disable_device(pdev); ++ ++ return retval; ++} /* End twl_probe() */ ++ ++/* This function is called to remove a device */ ++static void twl_remove(struct pci_dev *pdev) ++{ ++ struct Scsi_Host *host = pci_get_drvdata(pdev); ++ TW_Device_Extension *tw_dev; ++ ++ if (!host) ++ return; ++ ++ tw_dev = (TW_Device_Extension *)host->hostdata; ++ ++ if (!tw_dev->online) ++ return; ++ ++ /* Remove sysfs binary files */ ++ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr); ++ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr); ++ ++ scsi_remove_host(tw_dev->host); ++ ++ /* Unregister character device */ ++ if (twl_major >= 0) { ++ unregister_chrdev(twl_major, "twl"); ++ twl_major = -1; ++ } ++ ++ /* Shutdown the card */ ++ __twl_shutdown(tw_dev); ++ ++ /* Disable MSI if enabled */ ++ if (test_bit(TW_USING_MSI, &tw_dev->flags)) ++ pci_disable_msi(pdev); ++ ++ /* Free IO remapping */ ++ iounmap(tw_dev->base_addr); ++ ++ /* Free up the mem region */ ++ pci_release_regions(pdev); ++ ++ /* Free up device extension resources */ ++ twl_free_device_extension(tw_dev); ++ ++ scsi_host_put(tw_dev->host); ++ pci_disable_device(pdev); ++ twl_device_extension_count--; ++} /* End twl_remove() */ ++ ++#ifdef CONFIG_PM ++/* This function is called on PCI suspend */ ++static int twl_suspend(struct pci_dev *pdev, pm_message_t state) ++{ ++ struct Scsi_Host *host = pci_get_drvdata(pdev); ++ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; ++ ++ printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no); ++ /* Disable interrupts */ ++ TWL_MASK_INTERRUPTS(tw_dev); ++ ++ free_irq(tw_dev->tw_pci_dev->irq, tw_dev); ++ ++ /* Tell the card we are shutting down */ ++ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend"); ++ } else { ++ printk(KERN_WARNING "3w-sas: Suspend complete.\n"); ++ } ++ ++ /* Clear doorbell interrupt */ ++ TWL_CLEAR_DB_INTERRUPT(tw_dev); ++ ++ pci_save_state(pdev); ++ pci_disable_device(pdev); ++ pci_set_power_state(pdev, pci_choose_state(pdev, state)); ++ ++ return 0; ++} /* End twl_suspend() */ ++ ++/* This function is called on PCI resume */ ++static int twl_resume(struct pci_dev *pdev) ++{ ++ int retval = 0; ++ struct Scsi_Host *host = pci_get_drvdata(pdev); ++ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; ++ ++ printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no); ++ pci_set_power_state(pdev, PCI_D0); ++ pci_enable_wake(pdev, PCI_D0, 0); ++ pci_restore_state(pdev); ++ ++ retval = pci_enable_device(pdev); ++ if (retval) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume"); ++ return retval; ++ } ++ ++ pci_set_master(pdev); ++ pci_try_set_mwi(pdev); ++ ++ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ++ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) ++ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ++ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { ++ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); ++ retval = -ENODEV; ++ goto out_disable_device; ++ } ++ ++ /* Initialize the card */ ++ if (twl_reset_sequence(tw_dev, 0)) { ++ retval = -ENODEV; ++ goto out_disable_device; ++ } ++ ++ /* Now setup the interrupt handler */ ++ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev); ++ if (retval) { ++ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume"); ++ retval = -ENODEV; ++ goto out_disable_device; ++ } ++ ++ /* Now enable MSI if enabled */ ++ if (test_bit(TW_USING_MSI, &tw_dev->flags)) ++ pci_enable_msi(pdev); ++ ++ /* Re-enable interrupts on the card */ ++ TWL_UNMASK_INTERRUPTS(tw_dev); ++ ++ printk(KERN_WARNING "3w-sas: Resume complete.\n"); ++ return 0; ++ ++out_disable_device: ++ scsi_remove_host(host); ++ pci_disable_device(pdev); ++ ++ return retval; ++} /* End twl_resume() */ ++#endif ++ ++/* PCI Devices supported by this driver */ ++static struct pci_device_id twl_pci_tbl[] __devinitdata = { ++ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9750, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, ++ { } ++}; ++MODULE_DEVICE_TABLE(pci, twl_pci_tbl); ++ ++/* pci_driver initializer */ ++static struct pci_driver twl_driver = { ++ .name = "3w-sas", ++ .id_table = twl_pci_tbl, ++ .probe = twl_probe, ++ .remove = twl_remove, ++#ifdef CONFIG_PM ++ .suspend = twl_suspend, ++ .resume = twl_resume, ++#endif ++ .shutdown = twl_shutdown ++}; ++ ++/* This function is called on driver initialization */ ++static int __init twl_init(void) ++{ ++ printk(KERN_WARNING "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); ++ ++ return pci_register_driver(&twl_driver); ++} /* End twl_init() */ ++ ++/* This function is called on driver exit */ ++static void __exit twl_exit(void) ++{ ++ pci_unregister_driver(&twl_driver); ++} /* End twl_exit() */ ++ ++module_init(twl_init); ++module_exit(twl_exit); ++ +diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h +new file mode 100644 +index 0000000..e620505 +--- /dev/null ++++ b/drivers/scsi/3w-sas.h +@@ -0,0 +1,396 @@ ++/* ++ 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux. ++ ++ Written By: Adam Radford <linuxraid@lsi.com> ++ ++ Copyright (C) 2009 LSI Corporation. ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; version 2 of the License. ++ ++ This program is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ NO WARRANTY ++ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR ++ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT ++ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, ++ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is ++ solely responsible for determining the appropriateness of using and ++ distributing the Program and assumes all risks associated with its ++ exercise of rights under this Agreement, including but not limited to ++ the risks and costs of program errors, damage to or loss of data, ++ programs or equipment, and unavailability or interruption of operations. ++ ++ DISCLAIMER OF LIABILITY ++ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY ++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ++ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR ++ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE ++ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED ++ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; if not, write to the Free Software ++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ ++ Bugs/Comments/Suggestions should be mailed to: ++ linuxraid@lsi.com ++ ++ For more information, goto: ++ http://www.lsi.com ++*/ ++ ++#ifndef _3W_SAS_H ++#define _3W_SAS_H ++ ++/* AEN severity table */ ++static char *twl_aen_severity_table[] = ++{ ++ "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0 ++}; ++ ++/* Liberator register offsets */ ++#define TWL_STATUS 0x0 /* Status */ ++#define TWL_HIBDB 0x20 /* Inbound doorbell */ ++#define TWL_HISTAT 0x30 /* Host interrupt status */ ++#define TWL_HIMASK 0x34 /* Host interrupt mask */ ++#define TWL_HOBDB 0x9C /* Outbound doorbell */ ++#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */ ++#define TWL_SCRPD3 0xBC /* Scratchpad */ ++#define TWL_HIBQPL 0xC0 /* Host inbound Q low */ ++#define TWL_HIBQPH 0xC4 /* Host inbound Q high */ ++#define TWL_HOBQPL 0xC8 /* Host outbound Q low */ ++#define TWL_HOBQPH 0xCC /* Host outbound Q high */ ++#define TWL_HISTATUS_VALID_INTERRUPT 0xC ++#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4 ++#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8 ++#define TWL_STATUS_OVERRUN_SUBMIT 0x2000 ++#define TWL_ISSUE_SOFT_RESET 0x100 ++#define TWL_CONTROLLER_READY 0x2000 ++#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000 ++#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000 ++#define TWL_PULL_MODE 0x1 ++ ++/* Command packet opcodes used by the driver */ ++#define TW_OP_INIT_CONNECTION 0x1 ++#define TW_OP_GET_PARAM 0x12 ++#define TW_OP_SET_PARAM 0x13 ++#define TW_OP_EXECUTE_SCSI 0x10 ++ ++/* Asynchronous Event Notification (AEN) codes used by the driver */ ++#define TW_AEN_QUEUE_EMPTY 0x0000 ++#define TW_AEN_SOFT_RESET 0x0001 ++#define TW_AEN_SYNC_TIME_WITH_HOST 0x031 ++#define TW_AEN_SEVERITY_ERROR 0x1 ++#define TW_AEN_SEVERITY_DEBUG 0x4 ++#define TW_AEN_NOT_RETRIEVED 0x1 ++ ++/* Command state defines */ ++#define TW_S_INITIAL 0x1 /* Initial state */ ++#define TW_S_STARTED 0x2 /* Id in use */ ++#define TW_S_POSTED 0x4 /* Posted to the controller */ ++#define TW_S_COMPLETED 0x8 /* Completed by isr */ ++#define TW_S_FINISHED 0x10 /* I/O completely done */ ++ ++/* Compatibility defines */ ++#define TW_9750_ARCH_ID 10 ++#define TW_CURRENT_DRIVER_SRL 40 ++#define TW_CURRENT_DRIVER_BUILD 0 ++#define TW_CURRENT_DRIVER_BRANCH 0 ++ ++/* Phase defines */ ++#define TW_PHASE_INITIAL 0 ++#define TW_PHASE_SGLIST 2 ++ ++/* Misc defines */ ++#define TW_SECTOR_SIZE 512 ++#define TW_MAX_UNITS 32 ++#define TW_INIT_MESSAGE_CREDITS 0x100 ++#define TW_INIT_COMMAND_PACKET_SIZE 0x3 ++#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6 ++#define TW_EXTENDED_INIT_CONNECT 0x2 ++#define TW_BASE_FW_SRL 24 ++#define TW_BASE_FW_BRANCH 0 ++#define TW_BASE_FW_BUILD 1 ++#define TW_Q_LENGTH 256 ++#define TW_Q_START 0 ++#define TW_MAX_SLOT 32 ++#define TW_MAX_RESET_TRIES 2 ++#define TW_MAX_CMDS_PER_LUN 254 ++#define TW_MAX_AEN_DRAIN 255 ++#define TW_IN_RESET 2 ++#define TW_USING_MSI 3 ++#define TW_IN_ATTENTION_LOOP 4 ++#define TW_MAX_SECTORS 256 ++#define TW_MAX_CDB_LEN 16 ++#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ ++#define TW_IOCTL_CHRDEV_FREE -1 ++#define TW_COMMAND_OFFSET 128 /* 128 bytes */ ++#define TW_VERSION_TABLE 0x0402 ++#define TW_TIMEKEEP_TABLE 0x040A ++#define TW_INFORMATION_TABLE 0x0403 ++#define TW_PARAM_FWVER 3 ++#define TW_PARAM_FWVER_LENGTH 16 ++#define TW_PARAM_BIOSVER 4 ++#define TW_PARAM_BIOSVER_LENGTH 16 ++#define TW_PARAM_MODEL 8 ++#define TW_PARAM_MODEL_LENGTH 16 ++#define TW_PARAM_PHY_SUMMARY_TABLE 1 ++#define TW_PARAM_PHYCOUNT 2 ++#define TW_PARAM_PHYCOUNT_LENGTH 1 ++#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools ++#define TW_ALLOCATION_LENGTH 128 ++#define TW_SENSE_DATA_LENGTH 18 ++#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a ++#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d ++#define TW_ERROR_UNIT_OFFLINE 0x128 ++#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3 ++#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4 ++#define TW_DRIVER 6 ++#ifndef PCI_DEVICE_ID_3WARE_9750 ++#define PCI_DEVICE_ID_3WARE_9750 0x1010 ++#endif ++ ++/* Bitmask macros to eliminate bitfields */ ++ ++/* opcode: 5, reserved: 3 */ ++#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f)) ++#define TW_OP_OUT(x) (x & 0x1f) ++ ++/* opcode: 5, sgloffset: 3 */ ++#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) ++#define TW_SGL_OUT(x) ((x >> 5) & 0x7) ++ ++/* severity: 3, reserved: 5 */ ++#define TW_SEV_OUT(x) (x & 0x7) ++ ++/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */ ++#define TW_RESID_OUT(x) ((x >> 16) & 0xffff) ++#define TW_NOTMFA_OUT(x) (x & 0x1) ++ ++/* request_id: 12, lun: 4 */ ++#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff)) ++#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf) ++ ++/* Register access macros */ ++#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS) ++#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL) ++#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH) ++#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB) ++#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC) ++#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK) ++#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT) ++#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH) ++#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL) ++#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB) ++#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3) ++#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev))) ++#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev))) ++#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev))) ++#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev))) ++ ++/* Macros */ ++#define TW_PRINTK(h,a,b,c) { \ ++if (h) \ ++printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ ++else \ ++printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ ++} ++#define TW_MAX_LUNS 16 ++#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4) ++#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92) ++#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94) ++#define TW_PADDING_LENGTH_LIBERATOR 136 ++#define TW_PADDING_LENGTH_LIBERATOR_OLD 132 ++#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x)) ++ ++#pragma pack(1) ++ ++/* SGL entry */ ++typedef struct TAG_TW_SG_Entry_ISO { ++ dma_addr_t address; ++ dma_addr_t length; ++} TW_SG_Entry_ISO; ++ ++/* Old Command Packet with ISO SGL */ ++typedef struct TW_Command { ++ unsigned char opcode__sgloffset; ++ unsigned char size; ++ unsigned char request_id; ++ unsigned char unit__hostid; ++ /* Second DWORD */ ++ unsigned char status; ++ unsigned char flags; ++ union { ++ unsigned short block_count; ++ unsigned short parameter_count; ++ } byte6_offset; ++ union { ++ struct { ++ u32 lba; ++ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; ++ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD]; ++ } io; ++ struct { ++ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD]; ++ u32 padding; ++ unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD]; ++ } param; ++ } byte8_offset; ++} TW_Command; ++ ++/* New Command Packet with ISO SGL */ ++typedef struct TAG_TW_Command_Apache { ++ unsigned char opcode__reserved; ++ unsigned char unit; ++ unsigned short request_id__lunl; ++ unsigned char status; ++ unsigned char sgl_offset; ++ unsigned short sgl_entries__lunh; ++ unsigned char cdb[16]; ++ TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH]; ++ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR]; ++} TW_Command_Apache; ++ ++/* New command packet header */ ++typedef struct TAG_TW_Command_Apache_Header { ++ unsigned char sense_data[TW_SENSE_DATA_LENGTH]; ++ struct { ++ char reserved[4]; ++ unsigned short error; ++ unsigned char padding; ++ unsigned char severity__reserved; ++ } status_block; ++ unsigned char err_specific_desc[98]; ++ struct { ++ unsigned char size_header; ++ unsigned short request_id; ++ unsigned char size_sense; ++ } header_desc; ++} TW_Command_Apache_Header; ++ ++/* This struct is a union of the 2 command packets */ ++typedef struct TAG_TW_Command_Full { ++ TW_Command_Apache_Header header; ++ union { ++ TW_Command oldcommand; ++ TW_Command_Apache newcommand; ++ } command; ++} TW_Command_Full; ++ ++/* Initconnection structure */ ++typedef struct TAG_TW_Initconnect { ++ unsigned char opcode__reserved; ++ unsigned char size; ++ unsigned char request_id; ++ unsigned char res2; ++ unsigned char status; ++ unsigned char flags; ++ unsigned short message_credits; ++ u32 features; ++ unsigned short fw_srl; ++ unsigned short fw_arch_id; ++ unsigned short fw_branch; ++ unsigned short fw_build; ++ u32 result; ++} TW_Initconnect; ++ ++/* Event info structure */ ++typedef struct TAG_TW_Event ++{ ++ unsigned int sequence_id; ++ unsigned int time_stamp_sec; ++ unsigned short aen_code; ++ unsigned char severity; ++ unsigned char retrieved; ++ unsigned char repeat_count; ++ unsigned char parameter_len; ++ unsigned char parameter_data[98]; ++} TW_Event; ++ ++typedef struct TAG_TW_Ioctl_Driver_Command { ++ unsigned int control_code; ++ unsigned int status; ++ unsigned int unique_id; ++ unsigned int sequence_id; ++ unsigned int os_specific; ++ unsigned int buffer_length; ++} TW_Ioctl_Driver_Command; ++ ++typedef struct TAG_TW_Ioctl_Apache { ++ TW_Ioctl_Driver_Command driver_command; ++ char padding[488]; ++ TW_Command_Full firmware_command; ++ char data_buffer[1]; ++} TW_Ioctl_Buf_Apache; ++ ++/* GetParam descriptor */ ++typedef struct { ++ unsigned short table_id; ++ unsigned short parameter_id; ++ unsigned short parameter_size_bytes; ++ unsigned short actual_parameter_size_bytes; ++ unsigned char data[1]; ++} TW_Param_Apache; ++ ++/* Compatibility information structure */ ++typedef struct TAG_TW_Compatibility_Info ++{ ++ char driver_version[32]; ++ unsigned short working_srl; ++ unsigned short working_branch; ++ unsigned short working_build; ++ unsigned short driver_srl_high; ++ unsigned short driver_branch_high; ++ unsigned short driver_build_high; ++ unsigned short driver_srl_low; ++ unsigned short driver_branch_low; ++ unsigned short driver_build_low; ++ unsigned short fw_on_ctlr_srl; ++ unsigned short fw_on_ctlr_branch; ++ unsigned short fw_on_ctlr_build; ++} TW_Compatibility_Info; ++ ++#pragma pack() ++ ++typedef struct TAG_TW_Device_Extension { ++ void __iomem *base_addr; ++ unsigned long *generic_buffer_virt[TW_Q_LENGTH]; ++ dma_addr_t generic_buffer_phys[TW_Q_LENGTH]; ++ TW_Command_Full *command_packet_virt[TW_Q_LENGTH]; ++ dma_addr_t command_packet_phys[TW_Q_LENGTH]; ++ TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH]; ++ dma_addr_t sense_buffer_phys[TW_Q_LENGTH]; ++ struct pci_dev *tw_pci_dev; ++ struct scsi_cmnd *srb[TW_Q_LENGTH]; ++ unsigned char free_queue[TW_Q_LENGTH]; ++ unsigned char free_head; ++ unsigned char free_tail; ++ int state[TW_Q_LENGTH]; ++ unsigned int posted_request_count; ++ unsigned int max_posted_request_count; ++ unsigned int max_sgl_entries; ++ unsigned int sgl_entries; ++ unsigned int num_resets; ++ unsigned int sector_count; ++ unsigned int max_sector_count; ++ unsigned int aen_count; ++ struct Scsi_Host *host; ++ long flags; ++ TW_Event *event_queue[TW_Q_LENGTH]; ++ unsigned char error_index; ++ unsigned int error_sequence_id; ++ int chrdev_request_id; ++ wait_queue_head_t ioctl_wqueue; ++ struct mutex ioctl_lock; ++ TW_Compatibility_Info tw_compat_info; ++ char online; ++} TW_Device_Extension; ++ ++#endif /* _3W_SAS_H */ ++ +diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c +index 1ddcf40..a85f062 100644 +--- a/drivers/scsi/BusLogic.c ++++ b/drivers/scsi/BusLogic.c +@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda + static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter + *PrototypeHostAdapter) + { ++ pax_track_stack(); ++ + /* + If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint + Host Adapters; otherwise, default to the standard ISA MultiMaster probe. +diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig +index e11cca4..4295679 100644 +--- a/drivers/scsi/Kconfig ++++ b/drivers/scsi/Kconfig +@@ -399,6 +399,17 @@ config SCSI_3W_9XXX + Please read the comments at the top of + <file:drivers/scsi/3w-9xxx.c>. + ++config SCSI_3W_SAS ++ tristate "3ware 97xx SAS/SATA-RAID support" ++ depends on PCI && SCSI ++ help ++ This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards. ++ ++ <http://www.lsi.com> ++ ++ Please read the comments at the top of ++ <file:drivers/scsi/3w-sas.c>. ++ + config SCSI_7000FASST + tristate "7000FASST SCSI support" + depends on ISA && SCSI && ISA_DMA_API +@@ -621,6 +632,14 @@ config SCSI_FLASHPOINT + substantial, so users of MultiMaster Host Adapters may not + wish to include it. + ++config VMWARE_PVSCSI ++ tristate "VMware PVSCSI driver support" ++ depends on PCI && SCSI && X86 ++ help ++ This driver supports VMware's para virtualized SCSI HBA. ++ To compile this driver as a module, choose M here: the ++ module will be called vmw_pvscsi. ++ + config LIBFC + tristate "LibFC module" + select SCSI_FC_ATTRS +diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile +index 3ad61db..c938975 100644 +--- a/drivers/scsi/Makefile ++++ b/drivers/scsi/Makefile +@@ -113,6 +113,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o + obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o + obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o + obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o ++obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o + obj-$(CONFIG_SCSI_PPA) += ppa.o + obj-$(CONFIG_SCSI_IMM) += imm.o + obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o +@@ -133,6 +134,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ + obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ + obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ + obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o ++obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o + + obj-$(CONFIG_ARM) += arm/ + +diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h +index cdbdec9..b7d560b 100644 +--- a/drivers/scsi/aacraid/aacraid.h ++++ b/drivers/scsi/aacraid/aacraid.h +@@ -471,7 +471,7 @@ struct adapter_ops + int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd); + /* Administrative operations */ + int (*adapter_comm)(struct aac_dev * dev, int comm); +-}; ++} __no_const; + + /* + * Define which interrupt handler needs to be installed +diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c +index a5b8e7b..a6a0e43 100644 +--- a/drivers/scsi/aacraid/commctrl.c ++++ b/drivers/scsi/aacraid/commctrl.c +@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) + u32 actual_fibsize64, actual_fibsize = 0; + int i; + ++ pax_track_stack(); + + if (dev->in_reset) { + dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c +index 9b97c3e..f099725 100644 +--- a/drivers/scsi/aacraid/linit.c ++++ b/drivers/scsi/aacraid/linit.c +@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = { + #elif defined(__devinitconst) + static const struct pci_device_id aac_pci_tbl[] __devinitconst = { + #else +-static const struct pci_device_id aac_pci_tbl[] __devinitdata = { ++static const struct pci_device_id aac_pci_tbl[] __devinitconst = { + #endif + { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */ + { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */ +diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c +index 996f722..9127845 100644 +--- a/drivers/scsi/aic94xx/aic94xx_init.c ++++ b/drivers/scsi/aic94xx/aic94xx_init.c +@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev, + flash_error_table[i].reason); + } + +-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO, ++static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR, + asd_show_update_bios, asd_store_update_bios); + + static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) +@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = { + .lldd_control_phy = asd_control_phy, + }; + +-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = { ++static const struct pci_device_id aic94xx_pci_table[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1}, +diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h +index 58efd4b..cb48dc7 100644 +--- a/drivers/scsi/bfa/bfa_ioc.h ++++ b/drivers/scsi/bfa/bfa_ioc.h +@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s { + bfa_ioc_disable_cbfn_t disable_cbfn; + bfa_ioc_hbfail_cbfn_t hbfail_cbfn; + bfa_ioc_reset_cbfn_t reset_cbfn; +-}; ++} __no_const; + + /** + * Heartbeat failure notification queue element. +diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h +index 7ad177e..5503586 100644 +--- a/drivers/scsi/bfa/bfa_iocfc.h ++++ b/drivers/scsi/bfa/bfa_iocfc.h +@@ -61,7 +61,7 @@ struct bfa_hwif_s { + void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); + void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, + u32 *nvecs, u32 *maxvec); +-}; ++} __no_const; + typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); + + struct bfa_iocfc_s { +diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c +index 4967643..cbec06b 100644 +--- a/drivers/scsi/dpt_i2o.c ++++ b/drivers/scsi/dpt_i2o.c +@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) + dma_addr_t addr; + ulong flags = 0; + ++ pax_track_stack(); ++ + memset(&msg, 0, MAX_MESSAGE_SIZE*4); + // get user msg size in u32s + if(get_user(size, &user_msg[0])){ +@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d + s32 rcode; + dma_addr_t addr; + ++ pax_track_stack(); ++ + memset(msg, 0 , sizeof(msg)); + len = scsi_bufflen(cmd); + direction = 0x00000000; +diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c +index c7076ce..e20c67c 100644 +--- a/drivers/scsi/eata.c ++++ b/drivers/scsi/eata.c +@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j, + struct hostdata *ha; + char name[16]; + ++ pax_track_stack(); ++ + sprintf(name, "%s%d", driver_name, j); + + if (!request_region(port_base, REGION_SIZE, driver_name)) { +diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c +index 11ae5c9..891daec 100644 +--- a/drivers/scsi/fcoe/libfcoe.c ++++ b/drivers/scsi/fcoe/libfcoe.c +@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) + size_t rlen; + size_t dlen; + ++ pax_track_stack(); ++ + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + if (sub != FIP_SC_REQ && sub != FIP_SC_REP) +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c +index 71c7bbe..e93088a 100644 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, + /* Start local port initiatialization */ + + lp->link_up = 0; +- lp->tt = fnic_transport_template; ++ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template)); + + lp->max_retry_count = fnic->config.flogi_retries; + lp->max_rport_retry_count = fnic->config.plogi_retries; +diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c +index bb96d74..9ec3ce4 100644 +--- a/drivers/scsi/gdth.c ++++ b/drivers/scsi/gdth.c +@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg) + ulong flags; + gdth_ha_str *ha; + ++ pax_track_stack(); ++ + if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) + return -EFAULT; + ha = gdth_find_ha(ldrv.ionode); +@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd) + gdth_ha_str *ha; + int rval; + ++ pax_track_stack(); ++ + if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) || + res.number >= MAX_HDRIVES) + return -EFAULT; +@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd) + gdth_ha_str *ha; + int rval; + ++ pax_track_stack(); ++ + if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general))) + return -EFAULT; + ha = gdth_find_ha(gen.ionode); +@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha) + int i; + gdth_cmd_str gdtcmd; + char cmnd[MAX_COMMAND_SIZE]; ++ ++ pax_track_stack(); ++ + memset(cmnd, 0xff, MAX_COMMAND_SIZE); + + TRACE2(("gdth_flush() hanum %d\n", ha->hanum)); +diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c +index 1258da3..20d8ae6 100644 +--- a/drivers/scsi/gdth_proc.c ++++ b/drivers/scsi/gdth_proc.c +@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, + ulong64 paddr; + + char cmnd[MAX_COMMAND_SIZE]; ++ ++ pax_track_stack(); ++ + memset(cmnd, 0xff, 12); + memset(&gdtcmd, 0, sizeof(gdth_cmd_str)); + +@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, + gdth_hget_str *phg; + char cmnd[MAX_COMMAND_SIZE]; + ++ pax_track_stack(); ++ + gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL); + estr = kmalloc(sizeof(*estr), GFP_KERNEL); + if (!gdtcmd || !estr) +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c +index d03a926..f324286 100644 +--- a/drivers/scsi/hosts.c ++++ b/drivers/scsi/hosts.c +@@ -40,7 +40,7 @@ + #include "scsi_logging.h" + + +-static atomic_t scsi_host_next_hn; /* host_no for next new host */ ++static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */ + + + static void scsi_host_cls_release(struct device *dev) +@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) + * subtract one because we increment first then return, but we need to + * know what the next host number was before increment + */ +- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; ++ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1; + shost->dma_channel = 0xff; + + /* These three are default values which can be overridden */ +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c +index a601159..55e19d2 100644 +--- a/drivers/scsi/ipr.c ++++ b/drivers/scsi/ipr.c +@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) + return true; + } + +-static struct ata_port_operations ipr_sata_ops = { ++static const struct ata_port_operations ipr_sata_ops = { + .phy_reset = ipr_ata_phy_reset, + .hardreset = ipr_sata_reset, + .post_internal_cmd = ipr_ata_post_internal, +diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h +index 4e49fbc..97907ff 100644 +--- a/drivers/scsi/ips.h ++++ b/drivers/scsi/ips.h +@@ -1027,7 +1027,7 @@ typedef struct { + int (*intr)(struct ips_ha *); + void (*enableint)(struct ips_ha *); + uint32_t (*statupd)(struct ips_ha *); +-} ips_hw_func_t; ++} __no_const ips_hw_func_t; + + typedef struct ips_ha { + uint8_t ha_id[IPS_MAX_CHANNELS+1]; +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index c1c1574..a9c9348 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -86,12 +86,12 @@ struct fc_exch_mgr { + * all together if not used XXX + */ + struct { +- atomic_t no_free_exch; +- atomic_t no_free_exch_xid; +- atomic_t xid_not_found; +- atomic_t xid_busy; +- atomic_t seq_not_found; +- atomic_t non_bls_resp; ++ atomic_unchecked_t no_free_exch; ++ atomic_unchecked_t no_free_exch_xid; ++ atomic_unchecked_t xid_not_found; ++ atomic_unchecked_t xid_busy; ++ atomic_unchecked_t seq_not_found; ++ atomic_unchecked_t non_bls_resp; + } stats; + }; + #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) +@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, + /* allocate memory for exchange */ + ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); + if (!ep) { +- atomic_inc(&mp->stats.no_free_exch); ++ atomic_inc_unchecked(&mp->stats.no_free_exch); + goto out; + } + memset(ep, 0, sizeof(*ep)); +@@ -557,7 +557,7 @@ out: + return ep; + err: + spin_unlock_bh(&pool->lock); +- atomic_inc(&mp->stats.no_free_exch_xid); ++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid); + mempool_free(ep, mp->ep_pool); + return NULL; + } +@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + xid = ntohs(fh->fh_ox_id); /* we originated exch */ + ep = fc_exch_find(mp, xid); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_OX_ID; + goto out; + } +@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + ep = fc_exch_find(mp, xid); + if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { + if (ep) { +- atomic_inc(&mp->stats.xid_busy); ++ atomic_inc_unchecked(&mp->stats.xid_busy); + reject = FC_RJT_RX_ID; + goto rel; + } +@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + } + xid = ep->xid; /* get our XID */ + } else if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_RX_ID; /* XID not found */ + goto out; + } +@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + } else { + sp = &ep->seq; + if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */ + goto rel; + } +@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + + ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto out; + } + if (ep->esb_stat & ESB_ST_COMPLETE) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto out; + } + if (ep->rxid == FC_XID_UNKNOWN) + ep->rxid = ntohs(fh->fh_rx_id); + if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + if (ep->did != ntoh24(fh->fh_s_id) && + ep->did != FC_FID_FLOGI) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + sof = fr_sof(fp); +@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + } else { + sp = &ep->seq; + if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + goto rel; + } + } +@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ + + if (!sp) +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + else +- atomic_inc(&mp->stats.non_bls_resp); ++ atomic_inc_unchecked(&mp->stats.non_bls_resp); + + fc_frame_free(fp); + } +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c +index 0ee989f..a582241 100644 +--- a/drivers/scsi/libsas/sas_ata.c ++++ b/drivers/scsi/libsas/sas_ata.c +@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in, + } + } + +-static struct ata_port_operations sas_sata_ops = { ++static const struct ata_port_operations sas_sata_ops = { + .phy_reset = sas_ata_phy_reset, + .post_internal_cmd = sas_ata_post_internal, + .qc_defer = ata_std_qc_defer, +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h +index aa10f79..5cc79e4 100644 +--- a/drivers/scsi/lpfc/lpfc.h ++++ b/drivers/scsi/lpfc/lpfc.h +@@ -400,7 +400,7 @@ struct lpfc_vport { + struct dentry *debug_nodelist; + struct dentry *vport_debugfs_root; + struct lpfc_debugfs_trc *disc_trc; +- atomic_t disc_trc_cnt; ++ atomic_unchecked_t disc_trc_cnt; + #endif + uint8_t stat_data_enabled; + uint8_t stat_data_blocked; +@@ -725,8 +725,8 @@ struct lpfc_hba { + struct timer_list fabric_block_timer; + unsigned long bit_flags; + #define FABRIC_COMANDS_BLOCKED 0 +- atomic_t num_rsrc_err; +- atomic_t num_cmd_success; ++ atomic_unchecked_t num_rsrc_err; ++ atomic_unchecked_t num_cmd_success; + unsigned long last_rsrc_error_time; + unsigned long last_ramp_down_time; + unsigned long last_ramp_up_time; +@@ -740,7 +740,7 @@ struct lpfc_hba { + struct dentry *debug_dumpDif; /* BlockGuard BPL*/ + struct dentry *debug_slow_ring_trc; + struct lpfc_debugfs_trc *slow_ring_trc; +- atomic_t slow_ring_trc_cnt; ++ atomic_unchecked_t slow_ring_trc_cnt; + #endif + + /* Used for deferred freeing of ELS data buffers */ +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c +index 8d0f0de..7c77a62 100644 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c +@@ -124,7 +124,7 @@ struct lpfc_debug { + int len; + }; + +-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); ++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); + static unsigned long lpfc_debugfs_start_time = 0L; + + /** +@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) + lpfc_debugfs_enable = 0; + + len = 0; +- index = (atomic_read(&vport->disc_trc_cnt) + 1) & ++ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) & + (lpfc_debugfs_max_disc_trc - 1); + for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { + dtp = vport->disc_trc + i; +@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) + lpfc_debugfs_enable = 0; + + len = 0; +- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & ++ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) & + (lpfc_debugfs_max_slow_ring_trc - 1); + for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { + dtp = phba->slow_ring_trc + i; +@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) + uint32_t *ptr; + char buffer[1024]; + ++ pax_track_stack(); ++ + off = 0; + spin_lock_irq(&phba->hbalock); + +@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, + !vport || !vport->disc_trc) + return; + +- index = atomic_inc_return(&vport->disc_trc_cnt) & ++ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) & + (lpfc_debugfs_max_disc_trc - 1); + dtp = vport->disc_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; + #endif + return; +@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, + !phba || !phba->slow_ring_trc) + return; + +- index = atomic_inc_return(&phba->slow_ring_trc_cnt) & ++ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) & + (lpfc_debugfs_max_slow_ring_trc - 1); + dtp = phba->slow_ring_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; + #endif + return; +@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) + "slow_ring buffer\n"); + goto debug_failed; + } +- atomic_set(&phba->slow_ring_trc_cnt, 0); ++ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0); + memset(phba->slow_ring_trc, 0, + (sizeof(struct lpfc_debugfs_trc) * + lpfc_debugfs_max_slow_ring_trc)); +@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) + "buffer\n"); + goto debug_failed; + } +- atomic_set(&vport->disc_trc_cnt, 0); ++ atomic_set_unchecked(&vport->disc_trc_cnt, 0); + + snprintf(name, sizeof(name), "discovery_trace"); + vport->debug_disc_trc = +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 549bc7d..8189dbb 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -8021,8 +8021,10 @@ lpfc_init(void) + printk(LPFC_COPYRIGHT "\n"); + + if (lpfc_enable_npiv) { +- lpfc_transport_functions.vport_create = lpfc_vport_create; +- lpfc_transport_functions.vport_delete = lpfc_vport_delete; ++ pax_open_kernel(); ++ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create; ++ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete; ++ pax_close_kernel(); + } + lpfc_transport_template = + fc_attach_transport(&lpfc_transport_functions); +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index c88f59f..ff2a42f 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) + uint32_t evt_posted; + + spin_lock_irqsave(&phba->hbalock, flags); +- atomic_inc(&phba->num_rsrc_err); ++ atomic_inc_unchecked(&phba->num_rsrc_err); + phba->last_rsrc_error_time = jiffies; + + if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { +@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, + unsigned long flags; + struct lpfc_hba *phba = vport->phba; + uint32_t evt_posted; +- atomic_inc(&phba->num_cmd_success); ++ atomic_inc_unchecked(&phba->num_cmd_success); + + if (vport->cfg_lun_queue_depth <= queue_depth) + return; +@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) + int i; + struct lpfc_rport_data *rdata; + +- num_rsrc_err = atomic_read(&phba->num_rsrc_err); +- num_cmd_success = atomic_read(&phba->num_cmd_success); ++ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err); ++ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success); + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) +@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) + } + } + lpfc_destroy_vport_work_array(phba, vports); +- atomic_set(&phba->num_rsrc_err, 0); +- atomic_set(&phba->num_cmd_success, 0); ++ atomic_set_unchecked(&phba->num_rsrc_err, 0); ++ atomic_set_unchecked(&phba->num_cmd_success, 0); + } + + /** +@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) + } + } + lpfc_destroy_vport_work_array(phba, vports); +- atomic_set(&phba->num_rsrc_err, 0); +- atomic_set(&phba->num_cmd_success, 0); ++ atomic_set_unchecked(&phba->num_rsrc_err, 0); ++ atomic_set_unchecked(&phba->num_cmd_success, 0); + } + + /** +diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c +index 234f0b7..3020aea 100644 +--- a/drivers/scsi/megaraid/megaraid_mbox.c ++++ b/drivers/scsi/megaraid/megaraid_mbox.c +@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter) + int rval; + int i; + ++ pax_track_stack(); ++ + // Allocate memory for the base list of scb for management module. + adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL); + +diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c +index 7a117c1..ee01e9e 100644 +--- a/drivers/scsi/osd/osd_initiator.c ++++ b/drivers/scsi/osd/osd_initiator.c +@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) + int nelem = ARRAY_SIZE(get_attrs), a = 0; + int ret; + ++ pax_track_stack(); ++ + or = osd_start_request(od, GFP_KERNEL); + if (!or) + return -ENOMEM; +diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c +index 9ab8c86..9425ad3 100644 +--- a/drivers/scsi/pmcraid.c ++++ b/drivers/scsi/pmcraid.c +@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) + res->scsi_dev = scsi_dev; + scsi_dev->hostdata = res; + res->change_detected = 0; +- atomic_set(&res->read_failures, 0); +- atomic_set(&res->write_failures, 0); ++ atomic_set_unchecked(&res->read_failures, 0); ++ atomic_set_unchecked(&res->write_failures, 0); + rc = 0; + } + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); +@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) + + /* If this was a SCSI read/write command keep count of errors */ + if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) +- atomic_inc(&res->read_failures); ++ atomic_inc_unchecked(&res->read_failures); + else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) +- atomic_inc(&res->write_failures); ++ atomic_inc_unchecked(&res->write_failures); + + if (!RES_IS_GSCSI(res->cfg_entry) && + masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { +@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp) + + pinstance = container_of(workp, struct pmcraid_instance, worker_q); + /* add resources only after host is added into system */ +- if (!atomic_read(&pinstance->expose_resources)) ++ if (!atomic_read_unchecked(&pinstance->expose_resources)) + return; + + spin_lock_irqsave(&pinstance->resource_lock, lock_flags); +@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance( + init_waitqueue_head(&pinstance->reset_wait_q); + + atomic_set(&pinstance->outstanding_cmds, 0); +- atomic_set(&pinstance->expose_resources, 0); ++ atomic_set_unchecked(&pinstance->expose_resources, 0); + + INIT_LIST_HEAD(&pinstance->free_res_q); + INIT_LIST_HEAD(&pinstance->used_res_q); +@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe( + /* Schedule worker thread to handle CCN and take care of adding and + * removing devices to OS + */ +- atomic_set(&pinstance->expose_resources, 1); ++ atomic_set_unchecked(&pinstance->expose_resources, 1); + schedule_work(&pinstance->worker_q); + return rc; + +diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h +index 3441b3f..6cbe8f7 100644 +--- a/drivers/scsi/pmcraid.h ++++ b/drivers/scsi/pmcraid.h +@@ -690,7 +690,7 @@ struct pmcraid_instance { + atomic_t outstanding_cmds; + + /* should add/delete resources to mid-layer now ?*/ +- atomic_t expose_resources; ++ atomic_unchecked_t expose_resources; + + /* Tasklet to handle deferred processing */ + struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS]; +@@ -727,8 +727,8 @@ struct pmcraid_resource_entry { + struct list_head queue; /* link to "to be exposed" resources */ + struct pmcraid_config_table_entry cfg_entry; + struct scsi_device *scsi_dev; /* Link scsi_device structure */ +- atomic_t read_failures; /* count of failed READ commands */ +- atomic_t write_failures; /* count of failed WRITE commands */ ++ atomic_unchecked_t read_failures; /* count of failed READ commands */ ++ atomic_unchecked_t write_failures; /* count of failed WRITE commands */ + + /* To indicate add/delete/modify during CCN */ + u8 change_detected; +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h +index 2150618..7034215 100644 +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -2089,7 +2089,7 @@ struct isp_operations { + + int (*get_flash_version) (struct scsi_qla_host *, void *); + int (*start_scsi) (srb_t *); +-}; ++} __no_const; + + /* MSI-X Support *************************************************************/ + +diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h +index 81b5f29..2ae1fad 100644 +--- a/drivers/scsi/qla4xxx/ql4_def.h ++++ b/drivers/scsi/qla4xxx/ql4_def.h +@@ -240,7 +240,7 @@ struct ddb_entry { + atomic_t retry_relogin_timer; /* Min Time between relogins + * (4000 only) */ + atomic_t relogin_timer; /* Max Time to wait for relogin to complete */ +- atomic_t relogin_retry_count; /* Num of times relogin has been ++ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been + * retried */ + + uint16_t port; +diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c +index af8c323..515dd51 100644 +--- a/drivers/scsi/qla4xxx/ql4_init.c ++++ b/drivers/scsi/qla4xxx/ql4_init.c +@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, + atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count); + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); + atomic_set(&ddb_entry->relogin_timer, 0); +- atomic_set(&ddb_entry->relogin_retry_count, 0); ++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); + atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); + list_add_tail(&ddb_entry->list, &ha->ddb_list); + ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; +@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, + atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); + atomic_set(&ddb_entry->port_down_timer, + ha->port_down_retry_count); +- atomic_set(&ddb_entry->relogin_retry_count, 0); ++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); + atomic_set(&ddb_entry->relogin_timer, 0); + clear_bit(DF_RELOGIN, &ddb_entry->flags); + clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 83c8b5e..a82b348 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) + ddb_entry->fw_ddb_device_state == + DDB_DS_SESSION_FAILED) { + /* Reset retry relogin timer */ +- atomic_inc(&ddb_entry->relogin_retry_count); ++ atomic_inc_unchecked(&ddb_entry->relogin_retry_count); + DEBUG2(printk("scsi%ld: index[%d] relogin" + " timed out-retrying" + " relogin (%d)\n", + ha->host_no, + ddb_entry->fw_ddb_index, +- atomic_read(&ddb_entry-> ++ atomic_read_unchecked(&ddb_entry-> + relogin_retry_count)) + ); + start_dpc++; +diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c +index dd098ca..686ce01 100644 +--- a/drivers/scsi/scsi.c ++++ b/drivers/scsi/scsi.c +@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) + unsigned long timeout; + int rtn = 0; + +- atomic_inc(&cmd->device->iorequest_cnt); ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt); + + /* check if the device is still usable */ + if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c +index bc3e363..e1a8e50 100644 +--- a/drivers/scsi/scsi_debug.c ++++ b/drivers/scsi/scsi_debug.c +@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, + unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; + unsigned char *cmd = (unsigned char *)scp->cmnd; + ++ pax_track_stack(); ++ + if ((errsts = check_readiness(scp, 1, devip))) + return errsts; + memset(arr, 0, sizeof(arr)); +@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp, + unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; + unsigned char *cmd = (unsigned char *)scp->cmnd; + ++ pax_track_stack(); ++ + if ((errsts = check_readiness(scp, 1, devip))) + return errsts; + memset(arr, 0, sizeof(arr)); +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 8df12522..c4c1472 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) + shost = sdev->host; + scsi_init_cmd_errh(cmd); + cmd->result = DID_NO_CONNECT << 16; +- atomic_inc(&cmd->device->iorequest_cnt); ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt); + + /* + * SCSI request completion path will do scsi_device_unbusy(), +@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq) + */ + cmd->serial_number = 0; + +- atomic_inc(&cmd->device->iodone_cnt); ++ atomic_inc_unchecked(&cmd->device->iodone_cnt); + if (cmd->result) +- atomic_inc(&cmd->device->ioerr_cnt); ++ atomic_inc_unchecked(&cmd->device->ioerr_cnt); + + disposition = scsi_decide_disposition(cmd); + if (disposition != SUCCESS && +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 91a93e0..eae0fe3 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \ + char *buf) \ + { \ + struct scsi_device *sdev = to_scsi_device(dev); \ +- unsigned long long count = atomic_read(&sdev->field); \ ++ unsigned long long count = atomic_read_unchecked(&sdev->field); \ + return snprintf(buf, 20, "0x%llx\n", count); \ + } \ + static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) +diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c +index 1030327..f91fd30 100644 +--- a/drivers/scsi/scsi_tgt_lib.c ++++ b/drivers/scsi/scsi_tgt_lib.c +@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, + int err; + + dprintk("%lx %u\n", uaddr, len); +- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); ++ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL); + if (err) { + /* + * TODO: need to fixup sg_tablesize, max_segment_size, +diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c +index db02e31..1b42ea9 100644 +--- a/drivers/scsi/scsi_transport_fc.c ++++ b/drivers/scsi/scsi_transport_fc.c +@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo, + * Netlink Infrastructure + */ + +-static atomic_t fc_event_seq; ++static atomic_unchecked_t fc_event_seq; + + /** + * fc_get_event_number - Obtain the next sequential FC event number +@@ -493,7 +493,7 @@ static atomic_t fc_event_seq; + u32 + fc_get_event_number(void) + { +- return atomic_add_return(1, &fc_event_seq); ++ return atomic_add_return_unchecked(1, &fc_event_seq); + } + EXPORT_SYMBOL(fc_get_event_number); + +@@ -641,7 +641,7 @@ static __init int fc_transport_init(void) + { + int error; + +- atomic_set(&fc_event_seq, 0); ++ atomic_set_unchecked(&fc_event_seq, 0); + + error = transport_class_register(&fc_host_class); + if (error) +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index de2f8c4..63c5278 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -81,7 +81,7 @@ struct iscsi_internal { + struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1]; + }; + +-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ ++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */ + static struct workqueue_struct *iscsi_eh_timer_workq; + + /* +@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) + int err; + + ihost = shost->shost_data; +- session->sid = atomic_add_return(1, &iscsi_session_nr); ++ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr); + + if (id == ISCSI_MAX_TARGET) { + for (id = 0; id < ISCSI_MAX_TARGET; id++) { +@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void) + printk(KERN_INFO "Loading iSCSI transport class v%s.\n", + ISCSI_TRANSPORT_VERSION); + +- atomic_set(&iscsi_session_nr, 0); ++ atomic_set_unchecked(&iscsi_session_nr, 0); + + err = class_register(&iscsi_transport_class); + if (err) +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c +index 21a045e..ec89e03 100644 +--- a/drivers/scsi/scsi_transport_srp.c ++++ b/drivers/scsi/scsi_transport_srp.c +@@ -33,7 +33,7 @@ + #include "scsi_transport_srp_internal.h" + + struct srp_host_attrs { +- atomic_t next_port_id; ++ atomic_unchecked_t next_port_id; + }; + #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) + +@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev, + struct Scsi_Host *shost = dev_to_shost(dev); + struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); + +- atomic_set(&srp_host->next_port_id, 0); ++ atomic_set_unchecked(&srp_host->next_port_id, 0); + return 0; + } + +@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, + memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); + rport->roles = ids->roles; + +- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); ++ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id); + dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); + + transport_setup_device(&rport->dev); +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 040f751..98a5ed2 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp, + sdp->disk->disk_name, + MKDEV(SCSI_GENERIC_MAJOR, sdp->index), + NULL, +- (char *)arg); ++ (char __user *)arg); + case BLKTRACESTART: + return blk_trace_startstop(sdp->device->request_queue, 1); + case BLKTRACESTOP: +@@ -2292,7 +2292,7 @@ struct sg_proc_leaf { + const struct file_operations * fops; + }; + +-static struct sg_proc_leaf sg_proc_leaf_arr[] = { ++static const struct sg_proc_leaf sg_proc_leaf_arr[] = { + {"allow_dio", &adio_fops}, + {"debug", &debug_fops}, + {"def_reserved_size", &dressz_fops}, +@@ -2307,7 +2307,7 @@ sg_proc_init(void) + { + int k, mask; + int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); +- struct sg_proc_leaf * leaf; ++ const struct sg_proc_leaf * leaf; + + sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); + if (!sg_proc_sgp) +diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c +index c19ca5e..3eb5959 100644 +--- a/drivers/scsi/sym53c8xx_2/sym_glue.c ++++ b/drivers/scsi/sym53c8xx_2/sym_glue.c +@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev, + int do_iounmap = 0; + int do_disable_device = 1; + ++ pax_track_stack(); ++ + memset(&sym_dev, 0, sizeof(sym_dev)); + memset(&nvram, 0, sizeof(nvram)); + sym_dev.pdev = pdev; +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c +new file mode 100644 +index 0000000..eabb432 +--- /dev/null ++++ b/drivers/scsi/vmw_pvscsi.c +@@ -0,0 +1,1401 @@ ++/* ++ * Linux driver for VMware's para-virtualized SCSI HBA. ++ * ++ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; version 2 of the License and no later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or ++ * NON INFRINGEMENT. See the GNU General Public License for more ++ * details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Maintained by: Alok N Kataria <akataria@vmware.com> ++ * ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/types.h> ++#include <linux/interrupt.h> ++#include <linux/workqueue.h> ++#include <linux/pci.h> ++ ++#include <scsi/scsi.h> ++#include <scsi/scsi_host.h> ++#include <scsi/scsi_cmnd.h> ++#include <scsi/scsi_device.h> ++ ++#include "vmw_pvscsi.h" ++ ++#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" ++ ++MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); ++MODULE_AUTHOR("VMware, Inc."); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); ++ ++#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 ++#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 ++#define PVSCSI_DEFAULT_QUEUE_DEPTH 64 ++#define SGL_SIZE PAGE_SIZE ++ ++#define pvscsi_dev(adapter) (&(adapter->dev->dev)) ++ ++struct pvscsi_sg_list { ++ struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; ++}; ++ ++struct pvscsi_ctx { ++ /* ++ * The index of the context in cmd_map serves as the context ID for a ++ * 1-to-1 mapping completions back to requests. ++ */ ++ struct scsi_cmnd *cmd; ++ struct pvscsi_sg_list *sgl; ++ struct list_head list; ++ dma_addr_t dataPA; ++ dma_addr_t sensePA; ++ dma_addr_t sglPA; ++}; ++ ++struct pvscsi_adapter { ++ char *mmioBase; ++ unsigned int irq; ++ u8 rev; ++ bool use_msi; ++ bool use_msix; ++ bool use_msg; ++ ++ spinlock_t hw_lock; ++ ++ struct workqueue_struct *workqueue; ++ struct work_struct work; ++ ++ struct PVSCSIRingReqDesc *req_ring; ++ unsigned req_pages; ++ unsigned req_depth; ++ dma_addr_t reqRingPA; ++ ++ struct PVSCSIRingCmpDesc *cmp_ring; ++ unsigned cmp_pages; ++ dma_addr_t cmpRingPA; ++ ++ struct PVSCSIRingMsgDesc *msg_ring; ++ unsigned msg_pages; ++ dma_addr_t msgRingPA; ++ ++ struct PVSCSIRingsState *rings_state; ++ dma_addr_t ringStatePA; ++ ++ struct pci_dev *dev; ++ struct Scsi_Host *host; ++ ++ struct list_head cmd_pool; ++ struct pvscsi_ctx *cmd_map; ++}; ++ ++ ++/* Command line parameters */ ++static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; ++static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; ++static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; ++static bool pvscsi_disable_msi; ++static bool pvscsi_disable_msix; ++static bool pvscsi_use_msg = true; ++ ++#define PVSCSI_RW (S_IRUSR | S_IWUSR) ++ ++module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); ++MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" ++ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); ++ ++module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); ++MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" ++ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); ++ ++module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); ++MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" ++ __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); ++ ++module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); ++MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); ++ ++module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); ++MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); ++ ++module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); ++MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); ++ ++static const struct pci_device_id pvscsi_pci_tbl[] = { ++ { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, ++ { 0 } ++}; ++ ++MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); ++ ++static struct pvscsi_ctx * ++pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) ++{ ++ struct pvscsi_ctx *ctx, *end; ++ ++ end = &adapter->cmd_map[adapter->req_depth]; ++ for (ctx = adapter->cmd_map; ctx < end; ctx++) ++ if (ctx->cmd == cmd) ++ return ctx; ++ ++ return NULL; ++} ++ ++static struct pvscsi_ctx * ++pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) ++{ ++ struct pvscsi_ctx *ctx; ++ ++ if (list_empty(&adapter->cmd_pool)) ++ return NULL; ++ ++ ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); ++ ctx->cmd = cmd; ++ list_del(&ctx->list); ++ ++ return ctx; ++} ++ ++static void pvscsi_release_context(struct pvscsi_adapter *adapter, ++ struct pvscsi_ctx *ctx) ++{ ++ ctx->cmd = NULL; ++ list_add(&ctx->list, &adapter->cmd_pool); ++} ++ ++/* ++ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple ++ * non-zero integer. ctx always points to an entry in cmd_map array, hence ++ * the return value is always >=1. ++ */ ++static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, ++ const struct pvscsi_ctx *ctx) ++{ ++ return ctx - adapter->cmd_map + 1; ++} ++ ++static struct pvscsi_ctx * ++pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) ++{ ++ return &adapter->cmd_map[context - 1]; ++} ++ ++static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, ++ u32 offset, u32 val) ++{ ++ writel(val, adapter->mmioBase + offset); ++} ++ ++static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) ++{ ++ return readl(adapter->mmioBase + offset); ++} ++ ++static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) ++{ ++ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); ++} ++ ++static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, ++ u32 val) ++{ ++ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); ++} ++ ++static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) ++{ ++ u32 intr_bits; ++ ++ intr_bits = PVSCSI_INTR_CMPL_MASK; ++ if (adapter->use_msg) ++ intr_bits |= PVSCSI_INTR_MSG_MASK; ++ ++ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); ++} ++ ++static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) ++{ ++ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); ++} ++ ++static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, ++ u32 cmd, const void *desc, size_t len) ++{ ++ const u32 *ptr = desc; ++ size_t i; ++ ++ len /= sizeof(*ptr); ++ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); ++ for (i = 0; i < len; i++) ++ pvscsi_reg_write(adapter, ++ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); ++} ++ ++static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, ++ const struct pvscsi_ctx *ctx) ++{ ++ struct PVSCSICmdDescAbortCmd cmd = { 0 }; ++ ++ cmd.target = ctx->cmd->device->id; ++ cmd.context = pvscsi_map_context(adapter, ctx); ++ ++ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); ++} ++ ++static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) ++{ ++ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); ++} ++ ++static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) ++{ ++ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); ++} ++ ++static int scsi_is_rw(unsigned char op) ++{ ++ return op == READ_6 || op == WRITE_6 || ++ op == READ_10 || op == WRITE_10 || ++ op == READ_12 || op == WRITE_12 || ++ op == READ_16 || op == WRITE_16; ++} ++ ++static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, ++ unsigned char op) ++{ ++ if (scsi_is_rw(op)) ++ pvscsi_kick_rw_io(adapter); ++ else ++ pvscsi_process_request_ring(adapter); ++} ++ ++static void ll_adapter_reset(const struct pvscsi_adapter *adapter) ++{ ++ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); ++ ++ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); ++} ++ ++static void ll_bus_reset(const struct pvscsi_adapter *adapter) ++{ ++ dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter); ++ ++ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); ++} ++ ++static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) ++{ ++ struct PVSCSICmdDescResetDevice cmd = { 0 }; ++ ++ dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target); ++ ++ cmd.target = target; ++ ++ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, ++ &cmd, sizeof(cmd)); ++} ++ ++static void pvscsi_create_sg(struct pvscsi_ctx *ctx, ++ struct scatterlist *sg, unsigned count) ++{ ++ unsigned i; ++ struct PVSCSISGElement *sge; ++ ++ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); ++ ++ sge = &ctx->sgl->sge[0]; ++ for (i = 0; i < count; i++, sg++) { ++ sge[i].addr = sg_dma_address(sg); ++ sge[i].length = sg_dma_len(sg); ++ sge[i].flags = 0; ++ } ++} ++ ++/* ++ * Map all data buffers for a command into PCI space and ++ * setup the scatter/gather list if needed. ++ */ ++static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, ++ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, ++ struct PVSCSIRingReqDesc *e) ++{ ++ unsigned count; ++ unsigned bufflen = scsi_bufflen(cmd); ++ struct scatterlist *sg; ++ ++ e->dataLen = bufflen; ++ e->dataAddr = 0; ++ if (bufflen == 0) ++ return; ++ ++ sg = scsi_sglist(cmd); ++ count = scsi_sg_count(cmd); ++ if (count != 0) { ++ int segs = scsi_dma_map(cmd); ++ if (segs > 1) { ++ pvscsi_create_sg(ctx, sg, segs); ++ ++ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; ++ ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, ++ SGL_SIZE, PCI_DMA_TODEVICE); ++ e->dataAddr = ctx->sglPA; ++ } else ++ e->dataAddr = sg_dma_address(sg); ++ } else { ++ /* ++ * In case there is no S/G list, scsi_sglist points ++ * directly to the buffer. ++ */ ++ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, ++ cmd->sc_data_direction); ++ e->dataAddr = ctx->dataPA; ++ } ++} ++ ++static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, ++ struct pvscsi_ctx *ctx) ++{ ++ struct scsi_cmnd *cmd; ++ unsigned bufflen; ++ ++ cmd = ctx->cmd; ++ bufflen = scsi_bufflen(cmd); ++ ++ if (bufflen != 0) { ++ unsigned count = scsi_sg_count(cmd); ++ ++ if (count != 0) { ++ scsi_dma_unmap(cmd); ++ if (ctx->sglPA) { ++ pci_unmap_single(adapter->dev, ctx->sglPA, ++ SGL_SIZE, PCI_DMA_TODEVICE); ++ ctx->sglPA = 0; ++ } ++ } else ++ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, ++ cmd->sc_data_direction); ++ } ++ if (cmd->sense_buffer) ++ pci_unmap_single(adapter->dev, ctx->sensePA, ++ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); ++} ++ ++static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter) ++{ ++ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, ++ &adapter->ringStatePA); ++ if (!adapter->rings_state) ++ return -ENOMEM; ++ ++ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, ++ pvscsi_ring_pages); ++ adapter->req_depth = adapter->req_pages ++ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; ++ adapter->req_ring = pci_alloc_consistent(adapter->dev, ++ adapter->req_pages * PAGE_SIZE, ++ &adapter->reqRingPA); ++ if (!adapter->req_ring) ++ return -ENOMEM; ++ ++ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, ++ pvscsi_ring_pages); ++ adapter->cmp_ring = pci_alloc_consistent(adapter->dev, ++ adapter->cmp_pages * PAGE_SIZE, ++ &adapter->cmpRingPA); ++ if (!adapter->cmp_ring) ++ return -ENOMEM; ++ ++ BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); ++ BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); ++ BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); ++ ++ if (!adapter->use_msg) ++ return 0; ++ ++ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, ++ pvscsi_msg_ring_pages); ++ adapter->msg_ring = pci_alloc_consistent(adapter->dev, ++ adapter->msg_pages * PAGE_SIZE, ++ &adapter->msgRingPA); ++ if (!adapter->msg_ring) ++ return -ENOMEM; ++ BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); ++ ++ return 0; ++} ++ ++static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) ++{ ++ struct PVSCSICmdDescSetupRings cmd = { 0 }; ++ dma_addr_t base; ++ unsigned i; ++ ++ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; ++ cmd.reqRingNumPages = adapter->req_pages; ++ cmd.cmpRingNumPages = adapter->cmp_pages; ++ ++ base = adapter->reqRingPA; ++ for (i = 0; i < adapter->req_pages; i++) { ++ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; ++ base += PAGE_SIZE; ++ } ++ ++ base = adapter->cmpRingPA; ++ for (i = 0; i < adapter->cmp_pages; i++) { ++ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; ++ base += PAGE_SIZE; ++ } ++ ++ memset(adapter->rings_state, 0, PAGE_SIZE); ++ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); ++ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); ++ ++ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, ++ &cmd, sizeof(cmd)); ++ ++ if (adapter->use_msg) { ++ struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; ++ ++ cmd_msg.numPages = adapter->msg_pages; ++ ++ base = adapter->msgRingPA; ++ for (i = 0; i < adapter->msg_pages; i++) { ++ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; ++ base += PAGE_SIZE; ++ } ++ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); ++ ++ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, ++ &cmd_msg, sizeof(cmd_msg)); ++ } ++} ++ ++/* ++ * Pull a completion descriptor off and pass the completion back ++ * to the SCSI mid layer. ++ */ ++static void pvscsi_complete_request(struct pvscsi_adapter *adapter, ++ const struct PVSCSIRingCmpDesc *e) ++{ ++ struct pvscsi_ctx *ctx; ++ struct scsi_cmnd *cmd; ++ u32 btstat = e->hostStatus; ++ u32 sdstat = e->scsiStatus; ++ ++ ctx = pvscsi_get_context(adapter, e->context); ++ cmd = ctx->cmd; ++ pvscsi_unmap_buffers(adapter, ctx); ++ pvscsi_release_context(adapter, ctx); ++ cmd->result = 0; ++ ++ if (sdstat != SAM_STAT_GOOD && ++ (btstat == BTSTAT_SUCCESS || ++ btstat == BTSTAT_LINKED_COMMAND_COMPLETED || ++ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { ++ cmd->result = (DID_OK << 16) | sdstat; ++ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) ++ cmd->result |= (DRIVER_SENSE << 24); ++ } else ++ switch (btstat) { ++ case BTSTAT_SUCCESS: ++ case BTSTAT_LINKED_COMMAND_COMPLETED: ++ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: ++ /* If everything went fine, let's move on.. */ ++ cmd->result = (DID_OK << 16); ++ break; ++ ++ case BTSTAT_DATARUN: ++ case BTSTAT_DATA_UNDERRUN: ++ /* Report residual data in underruns */ ++ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); ++ cmd->result = (DID_ERROR << 16); ++ break; ++ ++ case BTSTAT_SELTIMEO: ++ /* Our emulation returns this for non-connected devs */ ++ cmd->result = (DID_BAD_TARGET << 16); ++ break; ++ ++ case BTSTAT_LUNMISMATCH: ++ case BTSTAT_TAGREJECT: ++ case BTSTAT_BADMSG: ++ cmd->result = (DRIVER_INVALID << 24); ++ /* fall through */ ++ ++ case BTSTAT_HAHARDWARE: ++ case BTSTAT_INVPHASE: ++ case BTSTAT_HATIMEOUT: ++ case BTSTAT_NORESPONSE: ++ case BTSTAT_DISCONNECT: ++ case BTSTAT_HASOFTWARE: ++ case BTSTAT_BUSFREE: ++ case BTSTAT_SENSFAILED: ++ cmd->result |= (DID_ERROR << 16); ++ break; ++ ++ case BTSTAT_SENTRST: ++ case BTSTAT_RECVRST: ++ case BTSTAT_BUSRESET: ++ cmd->result = (DID_RESET << 16); ++ break; ++ ++ case BTSTAT_ABORTQUEUE: ++ cmd->result = (DID_ABORT << 16); ++ break; ++ ++ case BTSTAT_SCSIPARITY: ++ cmd->result = (DID_PARITY << 16); ++ break; ++ ++ default: ++ cmd->result = (DID_ERROR << 16); ++ scmd_printk(KERN_DEBUG, cmd, ++ "Unknown completion status: 0x%x\n", ++ btstat); ++ } ++ ++ dev_dbg(&cmd->device->sdev_gendev, ++ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", ++ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); ++ ++ cmd->scsi_done(cmd); ++} ++ ++/* ++ * barrier usage : Since the PVSCSI device is emulated, there could be cases ++ * where we may want to serialize some accesses between the driver and the ++ * emulation layer. We use compiler barriers instead of the more expensive ++ * memory barriers because PVSCSI is only supported on X86 which has strong ++ * memory access ordering. ++ */ ++static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) ++{ ++ struct PVSCSIRingsState *s = adapter->rings_state; ++ struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; ++ u32 cmp_entries = s->cmpNumEntriesLog2; ++ ++ while (s->cmpConsIdx != s->cmpProdIdx) { ++ struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & ++ MASK(cmp_entries)); ++ /* ++ * This barrier() ensures that *e is not dereferenced while ++ * the device emulation still writes data into the slot. ++ * Since the device emulation advances s->cmpProdIdx only after ++ * updating the slot we want to check it first. ++ */ ++ barrier(); ++ pvscsi_complete_request(adapter, e); ++ /* ++ * This barrier() ensures that compiler doesn't reorder write ++ * to s->cmpConsIdx before the read of (*e) inside ++ * pvscsi_complete_request. Otherwise, device emulation may ++ * overwrite *e before we had a chance to read it. ++ */ ++ barrier(); ++ s->cmpConsIdx++; ++ } ++} ++ ++/* ++ * Translate a Linux SCSI request into a request ring entry. ++ */ ++static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, ++ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) ++{ ++ struct PVSCSIRingsState *s; ++ struct PVSCSIRingReqDesc *e; ++ struct scsi_device *sdev; ++ u32 req_entries; ++ ++ s = adapter->rings_state; ++ sdev = cmd->device; ++ req_entries = s->reqNumEntriesLog2; ++ ++ /* ++ * If this condition holds, we might have room on the request ring, but ++ * we might not have room on the completion ring for the response. ++ * However, we have already ruled out this possibility - we would not ++ * have successfully allocated a context if it were true, since we only ++ * have one context per request entry. Check for it anyway, since it ++ * would be a serious bug. ++ */ ++ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { ++ scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " ++ "ring full: reqProdIdx=%d cmpConsIdx=%d\n", ++ s->reqProdIdx, s->cmpConsIdx); ++ return -1; ++ } ++ ++ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); ++ ++ e->bus = sdev->channel; ++ e->target = sdev->id; ++ memset(e->lun, 0, sizeof(e->lun)); ++ e->lun[1] = sdev->lun; ++ ++ if (cmd->sense_buffer) { ++ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, ++ SCSI_SENSE_BUFFERSIZE, ++ PCI_DMA_FROMDEVICE); ++ e->senseAddr = ctx->sensePA; ++ e->senseLen = SCSI_SENSE_BUFFERSIZE; ++ } else { ++ e->senseLen = 0; ++ e->senseAddr = 0; ++ } ++ e->cdbLen = cmd->cmd_len; ++ e->vcpuHint = smp_processor_id(); ++ memcpy(e->cdb, cmd->cmnd, e->cdbLen); ++ ++ e->tag = SIMPLE_QUEUE_TAG; ++ if (sdev->tagged_supported && ++ (cmd->tag == HEAD_OF_QUEUE_TAG || ++ cmd->tag == ORDERED_QUEUE_TAG)) ++ e->tag = cmd->tag; ++ ++ if (cmd->sc_data_direction == DMA_FROM_DEVICE) ++ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; ++ else if (cmd->sc_data_direction == DMA_TO_DEVICE) ++ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; ++ else if (cmd->sc_data_direction == DMA_NONE) ++ e->flags = PVSCSI_FLAG_CMD_DIR_NONE; ++ else ++ e->flags = 0; ++ ++ pvscsi_map_buffers(adapter, ctx, cmd, e); ++ ++ e->context = pvscsi_map_context(adapter, ctx); ++ ++ barrier(); ++ ++ s->reqProdIdx++; ++ ++ return 0; ++} ++ ++static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) ++{ ++ struct Scsi_Host *host = cmd->device->host; ++ struct pvscsi_adapter *adapter = shost_priv(host); ++ struct pvscsi_ctx *ctx; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&adapter->hw_lock, flags); ++ ++ ctx = pvscsi_acquire_context(adapter, cmd); ++ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { ++ if (ctx) ++ pvscsi_release_context(adapter, ctx); ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } ++ ++ cmd->scsi_done = done; ++ ++ dev_dbg(&cmd->device->sdev_gendev, ++ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); ++ ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ ++ pvscsi_kick_io(adapter, cmd->cmnd[0]); ++ ++ return 0; ++} ++ ++static int pvscsi_abort(struct scsi_cmnd *cmd) ++{ ++ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); ++ struct pvscsi_ctx *ctx; ++ unsigned long flags; ++ ++ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", ++ adapter->host->host_no, cmd); ++ ++ spin_lock_irqsave(&adapter->hw_lock, flags); ++ ++ /* ++ * Poll the completion ring first - we might be trying to abort ++ * a command that is waiting to be dispatched in the completion ring. ++ */ ++ pvscsi_process_completion_ring(adapter); ++ ++ /* ++ * If there is no context for the command, it either already succeeded ++ * or else was never properly issued. Not our problem. ++ */ ++ ctx = pvscsi_find_context(adapter, cmd); ++ if (!ctx) { ++ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); ++ goto out; ++ } ++ ++ pvscsi_abort_cmd(adapter, ctx); ++ ++ pvscsi_process_completion_ring(adapter); ++ ++out: ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ return SUCCESS; ++} ++ ++/* ++ * Abort all outstanding requests. This is only safe to use if the completion ++ * ring will never be walked again or the device has been reset, because it ++ * destroys the 1-1 mapping between context field passed to emulation and our ++ * request structure. ++ */ ++static void pvscsi_reset_all(struct pvscsi_adapter *adapter) ++{ ++ unsigned i; ++ ++ for (i = 0; i < adapter->req_depth; i++) { ++ struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; ++ struct scsi_cmnd *cmd = ctx->cmd; ++ if (cmd) { ++ scmd_printk(KERN_ERR, cmd, ++ "Forced reset on cmd %p\n", cmd); ++ pvscsi_unmap_buffers(adapter, ctx); ++ pvscsi_release_context(adapter, ctx); ++ cmd->result = (DID_RESET << 16); ++ cmd->scsi_done(cmd); ++ } ++ } ++} ++ ++static int pvscsi_host_reset(struct scsi_cmnd *cmd) ++{ ++ struct Scsi_Host *host = cmd->device->host; ++ struct pvscsi_adapter *adapter = shost_priv(host); ++ unsigned long flags; ++ bool use_msg; ++ ++ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); ++ ++ spin_lock_irqsave(&adapter->hw_lock, flags); ++ ++ use_msg = adapter->use_msg; ++ ++ if (use_msg) { ++ adapter->use_msg = 0; ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ ++ /* ++ * Now that we know that the ISR won't add more work on the ++ * workqueue we can safely flush any outstanding work. ++ */ ++ flush_workqueue(adapter->workqueue); ++ spin_lock_irqsave(&adapter->hw_lock, flags); ++ } ++ ++ /* ++ * We're going to tear down the entire ring structure and set it back ++ * up, so stalling new requests until all completions are flushed and ++ * the rings are back in place. ++ */ ++ ++ pvscsi_process_request_ring(adapter); ++ ++ ll_adapter_reset(adapter); ++ ++ /* ++ * Now process any completions. Note we do this AFTER adapter reset, ++ * which is strange, but stops races where completions get posted ++ * between processing the ring and issuing the reset. The backend will ++ * not touch the ring memory after reset, so the immediately pre-reset ++ * completion ring state is still valid. ++ */ ++ pvscsi_process_completion_ring(adapter); ++ ++ pvscsi_reset_all(adapter); ++ adapter->use_msg = use_msg; ++ pvscsi_setup_all_rings(adapter); ++ pvscsi_unmask_intr(adapter); ++ ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ ++ return SUCCESS; ++} ++ ++static int pvscsi_bus_reset(struct scsi_cmnd *cmd) ++{ ++ struct Scsi_Host *host = cmd->device->host; ++ struct pvscsi_adapter *adapter = shost_priv(host); ++ unsigned long flags; ++ ++ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); ++ ++ /* ++ * We don't want to queue new requests for this bus after ++ * flushing all pending requests to emulation, since new ++ * requests could then sneak in during this bus reset phase, ++ * so take the lock now. ++ */ ++ spin_lock_irqsave(&adapter->hw_lock, flags); ++ ++ pvscsi_process_request_ring(adapter); ++ ll_bus_reset(adapter); ++ pvscsi_process_completion_ring(adapter); ++ ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ ++ return SUCCESS; ++} ++ ++static int pvscsi_device_reset(struct scsi_cmnd *cmd) ++{ ++ struct Scsi_Host *host = cmd->device->host; ++ struct pvscsi_adapter *adapter = shost_priv(host); ++ unsigned long flags; ++ ++ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", ++ host->host_no, cmd->device->id); ++ ++ /* ++ * We don't want to queue new requests for this device after flushing ++ * all pending requests to emulation, since new requests could then ++ * sneak in during this device reset phase, so take the lock now. ++ */ ++ spin_lock_irqsave(&adapter->hw_lock, flags); ++ ++ pvscsi_process_request_ring(adapter); ++ ll_device_reset(adapter, cmd->device->id); ++ pvscsi_process_completion_ring(adapter); ++ ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ ++ return SUCCESS; ++} ++ ++static struct scsi_host_template pvscsi_template; ++ ++static const char *pvscsi_info(struct Scsi_Host *host) ++{ ++ struct pvscsi_adapter *adapter = shost_priv(host); ++ static char buf[256]; ++ ++ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " ++ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, ++ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, ++ pvscsi_template.cmd_per_lun); ++ ++ return buf; ++} ++ ++static struct scsi_host_template pvscsi_template = { ++ .module = THIS_MODULE, ++ .name = "VMware PVSCSI Host Adapter", ++ .proc_name = "vmw_pvscsi", ++ .info = pvscsi_info, ++ .queuecommand = pvscsi_queue, ++ .this_id = -1, ++ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, ++ .dma_boundary = UINT_MAX, ++ .max_sectors = 0xffff, ++ .use_clustering = ENABLE_CLUSTERING, ++ .eh_abort_handler = pvscsi_abort, ++ .eh_device_reset_handler = pvscsi_device_reset, ++ .eh_bus_reset_handler = pvscsi_bus_reset, ++ .eh_host_reset_handler = pvscsi_host_reset, ++}; ++ ++static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, ++ const struct PVSCSIRingMsgDesc *e) ++{ ++ struct PVSCSIRingsState *s = adapter->rings_state; ++ struct Scsi_Host *host = adapter->host; ++ struct scsi_device *sdev; ++ ++ printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", ++ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); ++ ++ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); ++ ++ if (e->type == PVSCSI_MSG_DEV_ADDED) { ++ struct PVSCSIMsgDescDevStatusChanged *desc; ++ desc = (struct PVSCSIMsgDescDevStatusChanged *)e; ++ ++ printk(KERN_INFO ++ "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", ++ desc->bus, desc->target, desc->lun[1]); ++ ++ if (!scsi_host_get(host)) ++ return; ++ ++ sdev = scsi_device_lookup(host, desc->bus, desc->target, ++ desc->lun[1]); ++ if (sdev) { ++ printk(KERN_INFO "vmw_pvscsi: device already exists\n"); ++ scsi_device_put(sdev); ++ } else ++ scsi_add_device(adapter->host, desc->bus, ++ desc->target, desc->lun[1]); ++ ++ scsi_host_put(host); ++ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { ++ struct PVSCSIMsgDescDevStatusChanged *desc; ++ desc = (struct PVSCSIMsgDescDevStatusChanged *)e; ++ ++ printk(KERN_INFO ++ "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", ++ desc->bus, desc->target, desc->lun[1]); ++ ++ if (!scsi_host_get(host)) ++ return; ++ ++ sdev = scsi_device_lookup(host, desc->bus, desc->target, ++ desc->lun[1]); ++ if (sdev) { ++ scsi_remove_device(sdev); ++ scsi_device_put(sdev); ++ } else ++ printk(KERN_INFO ++ "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", ++ desc->bus, desc->target, desc->lun[1]); ++ ++ scsi_host_put(host); ++ } ++} ++ ++static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) ++{ ++ struct PVSCSIRingsState *s = adapter->rings_state; ++ ++ return s->msgProdIdx != s->msgConsIdx; ++} ++ ++static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) ++{ ++ struct PVSCSIRingsState *s = adapter->rings_state; ++ struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; ++ u32 msg_entries = s->msgNumEntriesLog2; ++ ++ while (pvscsi_msg_pending(adapter)) { ++ struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & ++ MASK(msg_entries)); ++ ++ barrier(); ++ pvscsi_process_msg(adapter, e); ++ barrier(); ++ s->msgConsIdx++; ++ } ++} ++ ++static void pvscsi_msg_workqueue_handler(struct work_struct *data) ++{ ++ struct pvscsi_adapter *adapter; ++ ++ adapter = container_of(data, struct pvscsi_adapter, work); ++ ++ pvscsi_process_msg_ring(adapter); ++} ++ ++static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) ++{ ++ char name[32]; ++ ++ if (!pvscsi_use_msg) ++ return 0; ++ ++ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, ++ PVSCSI_CMD_SETUP_MSG_RING); ++ ++ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) ++ return 0; ++ ++ snprintf(name, sizeof(name), ++ "vmw_pvscsi_wq_%u", adapter->host->host_no); ++ ++ adapter->workqueue = create_singlethread_workqueue(name); ++ if (!adapter->workqueue) { ++ printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); ++ return 0; ++ } ++ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); ++ ++ return 1; ++} ++ ++static irqreturn_t pvscsi_isr(int irq, void *devp) ++{ ++ struct pvscsi_adapter *adapter = devp; ++ int handled; ++ ++ if (adapter->use_msi || adapter->use_msix) ++ handled = true; ++ else { ++ u32 val = pvscsi_read_intr_status(adapter); ++ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0; ++ if (handled) ++ pvscsi_write_intr_status(devp, val); ++ } ++ ++ if (handled) { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&adapter->hw_lock, flags); ++ ++ pvscsi_process_completion_ring(adapter); ++ if (adapter->use_msg && pvscsi_msg_pending(adapter)) ++ queue_work(adapter->workqueue, &adapter->work); ++ ++ spin_unlock_irqrestore(&adapter->hw_lock, flags); ++ } ++ ++ return IRQ_RETVAL(handled); ++} ++ ++static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) ++{ ++ struct pvscsi_ctx *ctx = adapter->cmd_map; ++ unsigned i; ++ ++ for (i = 0; i < adapter->req_depth; ++i, ++ctx) ++ kfree(ctx->sgl); ++} ++ ++static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq) ++{ ++ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; ++ int ret; ++ ++ ret = pci_enable_msix(adapter->dev, &entry, 1); ++ if (ret) ++ return ret; ++ ++ *irq = entry.vector; ++ ++ return 0; ++} ++ ++static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) ++{ ++ if (adapter->irq) { ++ free_irq(adapter->irq, adapter); ++ adapter->irq = 0; ++ } ++ if (adapter->use_msi) { ++ pci_disable_msi(adapter->dev); ++ adapter->use_msi = 0; ++ } else if (adapter->use_msix) { ++ pci_disable_msix(adapter->dev); ++ adapter->use_msix = 0; ++ } ++} ++ ++static void pvscsi_release_resources(struct pvscsi_adapter *adapter) ++{ ++ pvscsi_shutdown_intr(adapter); ++ ++ if (adapter->workqueue) ++ destroy_workqueue(adapter->workqueue); ++ ++ if (adapter->mmioBase) ++ pci_iounmap(adapter->dev, adapter->mmioBase); ++ ++ pci_release_regions(adapter->dev); ++ ++ if (adapter->cmd_map) { ++ pvscsi_free_sgls(adapter); ++ kfree(adapter->cmd_map); ++ } ++ ++ if (adapter->rings_state) ++ pci_free_consistent(adapter->dev, PAGE_SIZE, ++ adapter->rings_state, adapter->ringStatePA); ++ ++ if (adapter->req_ring) ++ pci_free_consistent(adapter->dev, ++ adapter->req_pages * PAGE_SIZE, ++ adapter->req_ring, adapter->reqRingPA); ++ ++ if (adapter->cmp_ring) ++ pci_free_consistent(adapter->dev, ++ adapter->cmp_pages * PAGE_SIZE, ++ adapter->cmp_ring, adapter->cmpRingPA); ++ ++ if (adapter->msg_ring) ++ pci_free_consistent(adapter->dev, ++ adapter->msg_pages * PAGE_SIZE, ++ adapter->msg_ring, adapter->msgRingPA); ++} ++ ++/* ++ * Allocate scatter gather lists. ++ * ++ * These are statically allocated. Trying to be clever was not worth it. ++ * ++ * Dynamic allocation can fail, and we can't go deeep into the memory ++ * allocator, since we're a SCSI driver, and trying too hard to allocate ++ * memory might generate disk I/O. We also don't want to fail disk I/O ++ * in that case because we can't get an allocation - the I/O could be ++ * trying to swap out data to free memory. Since that is pathological, ++ * just use a statically allocated scatter list. ++ * ++ */ ++static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter) ++{ ++ struct pvscsi_ctx *ctx; ++ int i; ++ ++ ctx = adapter->cmd_map; ++ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); ++ ++ for (i = 0; i < adapter->req_depth; ++i, ++ctx) { ++ ctx->sgl = kmalloc(SGL_SIZE, GFP_KERNEL); ++ ctx->sglPA = 0; ++ BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); ++ if (!ctx->sgl) { ++ for (; i >= 0; --i, --ctx) { ++ kfree(ctx->sgl); ++ ctx->sgl = NULL; ++ } ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; ++} ++ ++static int __devinit pvscsi_probe(struct pci_dev *pdev, ++ const struct pci_device_id *id) ++{ ++ struct pvscsi_adapter *adapter; ++ struct Scsi_Host *host; ++ unsigned int i; ++ int error; ++ ++ error = -ENODEV; ++ ++ if (pci_enable_device(pdev)) ++ return error; ++ ++ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && ++ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { ++ printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); ++ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && ++ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { ++ printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); ++ } else { ++ printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); ++ goto out_disable_device; ++ } ++ ++ pvscsi_template.can_queue = ++ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * ++ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; ++ pvscsi_template.cmd_per_lun = ++ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); ++ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); ++ if (!host) { ++ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); ++ goto out_disable_device; ++ } ++ ++ adapter = shost_priv(host); ++ memset(adapter, 0, sizeof(*adapter)); ++ adapter->dev = pdev; ++ adapter->host = host; ++ ++ spin_lock_init(&adapter->hw_lock); ++ ++ host->max_channel = 0; ++ host->max_id = 16; ++ host->max_lun = 1; ++ host->max_cmd_len = 16; ++ ++ adapter->rev = pdev->revision; ++ ++ if (pci_request_regions(pdev, "vmw_pvscsi")) { ++ printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); ++ goto out_free_host; ++ } ++ ++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { ++ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) ++ continue; ++ ++ if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) ++ continue; ++ ++ break; ++ } ++ ++ if (i == DEVICE_COUNT_RESOURCE) { ++ printk(KERN_ERR ++ "vmw_pvscsi: adapter has no suitable MMIO region\n"); ++ goto out_release_resources; ++ } ++ ++ adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); ++ ++ if (!adapter->mmioBase) { ++ printk(KERN_ERR ++ "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", ++ i, PVSCSI_MEM_SPACE_SIZE); ++ goto out_release_resources; ++ } ++ ++ pci_set_master(pdev); ++ pci_set_drvdata(pdev, host); ++ ++ ll_adapter_reset(adapter); ++ ++ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); ++ ++ error = pvscsi_allocate_rings(adapter); ++ if (error) { ++ printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); ++ goto out_release_resources; ++ } ++ ++ /* ++ * From this point on we should reset the adapter if anything goes ++ * wrong. ++ */ ++ pvscsi_setup_all_rings(adapter); ++ ++ adapter->cmd_map = kcalloc(adapter->req_depth, ++ sizeof(struct pvscsi_ctx), GFP_KERNEL); ++ if (!adapter->cmd_map) { ++ printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); ++ error = -ENOMEM; ++ goto out_reset_adapter; ++ } ++ ++ INIT_LIST_HEAD(&adapter->cmd_pool); ++ for (i = 0; i < adapter->req_depth; i++) { ++ struct pvscsi_ctx *ctx = adapter->cmd_map + i; ++ list_add(&ctx->list, &adapter->cmd_pool); ++ } ++ ++ error = pvscsi_allocate_sg(adapter); ++ if (error) { ++ printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); ++ goto out_reset_adapter; ++ } ++ ++ if (!pvscsi_disable_msix && ++ pvscsi_setup_msix(adapter, &adapter->irq) == 0) { ++ printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); ++ adapter->use_msix = 1; ++ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { ++ printk(KERN_INFO "vmw_pvscsi: using MSI\n"); ++ adapter->use_msi = 1; ++ adapter->irq = pdev->irq; ++ } else { ++ printk(KERN_INFO "vmw_pvscsi: using INTx\n"); ++ adapter->irq = pdev->irq; ++ } ++ ++ error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED, ++ "vmw_pvscsi", adapter); ++ if (error) { ++ printk(KERN_ERR ++ "vmw_pvscsi: unable to request IRQ: %d\n", error); ++ adapter->irq = 0; ++ goto out_reset_adapter; ++ } ++ ++ error = scsi_add_host(host, &pdev->dev); ++ if (error) { ++ printk(KERN_ERR ++ "vmw_pvscsi: scsi_add_host failed: %d\n", error); ++ goto out_reset_adapter; ++ } ++ ++ dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", ++ adapter->rev, host->host_no); ++ ++ pvscsi_unmask_intr(adapter); ++ ++ scsi_scan_host(host); ++ ++ return 0; ++ ++out_reset_adapter: ++ ll_adapter_reset(adapter); ++out_release_resources: ++ pvscsi_release_resources(adapter); ++out_free_host: ++ scsi_host_put(host); ++out_disable_device: ++ pci_set_drvdata(pdev, NULL); ++ pci_disable_device(pdev); ++ ++ return error; ++} ++ ++static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) ++{ ++ pvscsi_mask_intr(adapter); ++ ++ if (adapter->workqueue) ++ flush_workqueue(adapter->workqueue); ++ ++ pvscsi_shutdown_intr(adapter); ++ ++ pvscsi_process_request_ring(adapter); ++ pvscsi_process_completion_ring(adapter); ++ ll_adapter_reset(adapter); ++} ++ ++static void pvscsi_shutdown(struct pci_dev *dev) ++{ ++ struct Scsi_Host *host = pci_get_drvdata(dev); ++ struct pvscsi_adapter *adapter = shost_priv(host); ++ ++ __pvscsi_shutdown(adapter); ++} ++ ++static void pvscsi_remove(struct pci_dev *pdev) ++{ ++ struct Scsi_Host *host = pci_get_drvdata(pdev); ++ struct pvscsi_adapter *adapter = shost_priv(host); ++ ++ scsi_remove_host(host); ++ ++ __pvscsi_shutdown(adapter); ++ pvscsi_release_resources(adapter); ++ ++ scsi_host_put(host); ++ ++ pci_set_drvdata(pdev, NULL); ++ pci_disable_device(pdev); ++} ++ ++static struct pci_driver pvscsi_pci_driver = { ++ .name = "vmw_pvscsi", ++ .id_table = pvscsi_pci_tbl, ++ .probe = pvscsi_probe, ++ .remove = __devexit_p(pvscsi_remove), ++ .shutdown = pvscsi_shutdown, ++}; ++ ++static int __init pvscsi_init(void) ++{ ++ pr_info("%s - version %s\n", ++ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); ++ return pci_register_driver(&pvscsi_pci_driver); ++} ++ ++static void __exit pvscsi_exit(void) ++{ ++ pci_unregister_driver(&pvscsi_pci_driver); ++} ++ ++module_init(pvscsi_init); ++module_exit(pvscsi_exit); +diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h +new file mode 100644 +index 0000000..62e36e7 +--- /dev/null ++++ b/drivers/scsi/vmw_pvscsi.h +@@ -0,0 +1,397 @@ ++/* ++ * VMware PVSCSI header file ++ * ++ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; version 2 of the License and no later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or ++ * NON INFRINGEMENT. See the GNU General Public License for more ++ * details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Maintained by: Alok N Kataria <akataria@vmware.com> ++ * ++ */ ++ ++#ifndef _VMW_PVSCSI_H_ ++#define _VMW_PVSCSI_H_ ++ ++#include <linux/types.h> ++ ++#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k" ++ ++#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 ++ ++#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ ++ ++#define PCI_VENDOR_ID_VMWARE 0x15AD ++#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0 ++ ++/* ++ * host adapter status/error codes ++ */ ++enum HostBusAdapterStatus { ++ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ ++ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, ++ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, ++ BTSTAT_DATA_UNDERRUN = 0x0c, ++ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ ++ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ ++ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ ++ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */ ++ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */ ++ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ ++ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */ ++ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */ ++ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ ++ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */ ++ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ ++ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */ ++ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */ ++ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ ++ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ ++ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ ++ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ ++ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ ++}; ++ ++/* ++ * Register offsets. ++ * ++ * These registers are accessible both via i/o space and mm i/o. ++ */ ++ ++enum PVSCSIRegOffset { ++ PVSCSI_REG_OFFSET_COMMAND = 0x0, ++ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4, ++ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8, ++ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100, ++ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104, ++ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108, ++ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c, ++ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c, ++ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010, ++ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014, ++ PVSCSI_REG_OFFSET_DEBUG = 0x3018, ++ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018, ++}; ++ ++/* ++ * Virtual h/w commands. ++ */ ++ ++enum PVSCSICommands { ++ PVSCSI_CMD_FIRST = 0, /* has to be first */ ++ ++ PVSCSI_CMD_ADAPTER_RESET = 1, ++ PVSCSI_CMD_ISSUE_SCSI = 2, ++ PVSCSI_CMD_SETUP_RINGS = 3, ++ PVSCSI_CMD_RESET_BUS = 4, ++ PVSCSI_CMD_RESET_DEVICE = 5, ++ PVSCSI_CMD_ABORT_CMD = 6, ++ PVSCSI_CMD_CONFIG = 7, ++ PVSCSI_CMD_SETUP_MSG_RING = 8, ++ PVSCSI_CMD_DEVICE_UNPLUG = 9, ++ ++ PVSCSI_CMD_LAST = 10 /* has to be last */ ++}; ++ ++/* ++ * Command descriptor for PVSCSI_CMD_RESET_DEVICE -- ++ */ ++ ++struct PVSCSICmdDescResetDevice { ++ u32 target; ++ u8 lun[8]; ++} __packed; ++ ++/* ++ * Command descriptor for PVSCSI_CMD_ABORT_CMD -- ++ * ++ * - currently does not support specifying the LUN. ++ * - _pad should be 0. ++ */ ++ ++struct PVSCSICmdDescAbortCmd { ++ u64 context; ++ u32 target; ++ u32 _pad; ++} __packed; ++ ++/* ++ * Command descriptor for PVSCSI_CMD_SETUP_RINGS -- ++ * ++ * Notes: ++ * - reqRingNumPages and cmpRingNumPages need to be power of two. ++ * - reqRingNumPages and cmpRingNumPages need to be different from 0, ++ * - reqRingNumPages and cmpRingNumPages need to be inferior to ++ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES. ++ */ ++ ++#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32 ++struct PVSCSICmdDescSetupRings { ++ u32 reqRingNumPages; ++ u32 cmpRingNumPages; ++ u64 ringsStatePPN; ++ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; ++ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; ++} __packed; ++ ++/* ++ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING -- ++ * ++ * Notes: ++ * - this command was not supported in the initial revision of the h/w ++ * interface. Before using it, you need to check that it is supported by ++ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then ++ * immediately after read the 'command status' register: ++ * * a value of -1 means that the cmd is NOT supported, ++ * * a value != -1 means that the cmd IS supported. ++ * If it's supported the 'command status' register should return: ++ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32). ++ * - this command should be issued _after_ the usual SETUP_RINGS so that the ++ * RingsState page is already setup. If not, the command is a nop. ++ * - numPages needs to be a power of two, ++ * - numPages needs to be different from 0, ++ * - _pad should be zero. ++ */ ++ ++#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16 ++ ++struct PVSCSICmdDescSetupMsgRing { ++ u32 numPages; ++ u32 _pad; ++ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES]; ++} __packed; ++ ++enum PVSCSIMsgType { ++ PVSCSI_MSG_DEV_ADDED = 0, ++ PVSCSI_MSG_DEV_REMOVED = 1, ++ PVSCSI_MSG_LAST = 2, ++}; ++ ++/* ++ * Msg descriptor. ++ * ++ * sizeof(struct PVSCSIRingMsgDesc) == 128. ++ * ++ * - type is of type enum PVSCSIMsgType. ++ * - the content of args depend on the type of event being delivered. ++ */ ++ ++struct PVSCSIRingMsgDesc { ++ u32 type; ++ u32 args[31]; ++} __packed; ++ ++struct PVSCSIMsgDescDevStatusChanged { ++ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */ ++ u32 bus; ++ u32 target; ++ u8 lun[8]; ++ u32 pad[27]; ++} __packed; ++ ++/* ++ * Rings state. ++ * ++ * - the fields: ++ * . msgProdIdx, ++ * . msgConsIdx, ++ * . msgNumEntriesLog2, ++ * .. are only used once the SETUP_MSG_RING cmd has been issued. ++ * - '_pad' helps to ensure that the msg related fields are on their own ++ * cache-line. ++ */ ++ ++struct PVSCSIRingsState { ++ u32 reqProdIdx; ++ u32 reqConsIdx; ++ u32 reqNumEntriesLog2; ++ ++ u32 cmpProdIdx; ++ u32 cmpConsIdx; ++ u32 cmpNumEntriesLog2; ++ ++ u8 _pad[104]; ++ ++ u32 msgProdIdx; ++ u32 msgConsIdx; ++ u32 msgNumEntriesLog2; ++} __packed; ++ ++/* ++ * Request descriptor. ++ * ++ * sizeof(RingReqDesc) = 128 ++ * ++ * - context: is a unique identifier of a command. It could normally be any ++ * 64bit value, however we currently store it in the serialNumber variable ++ * of struct SCSI_Command, so we have the following restrictions due to the ++ * way this field is handled in the vmkernel storage stack: ++ * * this value can't be 0, ++ * * the upper 32bit need to be 0 since serialNumber is as a u32. ++ * Currently tracked as PR 292060. ++ * - dataLen: contains the total number of bytes that need to be transferred. ++ * - dataAddr: ++ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first ++ * s/g table segment, each s/g segment is entirely contained on a single ++ * page of physical memory, ++ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of ++ * the buffer used for the DMA transfer, ++ * - flags: ++ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above, ++ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved, ++ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory, ++ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device, ++ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than ++ * 16bytes. To be specified. ++ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the ++ * completion of the i/o. For guest OSes that use lowest priority message ++ * delivery mode (such as windows), we use this "hint" to deliver the ++ * completion action to the proper vcpu. For now, we can use the vcpuId of ++ * the processor that initiated the i/o as a likely candidate for the vcpu ++ * that will be waiting for the completion.. ++ * - bus should be 0: we currently only support bus 0 for now. ++ * - unused should be zero'd. ++ */ ++ ++#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0) ++#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1) ++#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2) ++#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3) ++#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4) ++ ++struct PVSCSIRingReqDesc { ++ u64 context; ++ u64 dataAddr; ++ u64 dataLen; ++ u64 senseAddr; ++ u32 senseLen; ++ u32 flags; ++ u8 cdb[16]; ++ u8 cdbLen; ++ u8 lun[8]; ++ u8 tag; ++ u8 bus; ++ u8 target; ++ u8 vcpuHint; ++ u8 unused[59]; ++} __packed; ++ ++/* ++ * Scatter-gather list management. ++ * ++ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the ++ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g ++ * table segment. ++ * ++ * - each segment of the s/g table contain a succession of struct ++ * PVSCSISGElement. ++ * - each segment is entirely contained on a single physical page of memory. ++ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in ++ * PVSCSISGElement.flags and in this case: ++ * * addr is the PA of the next s/g segment, ++ * * length is undefined, assumed to be 0. ++ */ ++ ++struct PVSCSISGElement { ++ u64 addr; ++ u32 length; ++ u32 flags; ++} __packed; ++ ++/* ++ * Completion descriptor. ++ * ++ * sizeof(RingCmpDesc) = 32 ++ * ++ * - context: identifier of the command. The same thing that was specified ++ * under "context" as part of struct RingReqDesc at initiation time, ++ * - dataLen: number of bytes transferred for the actual i/o operation, ++ * - senseLen: number of bytes written into the sense buffer, ++ * - hostStatus: adapter status, ++ * - scsiStatus: device status, ++ * - _pad should be zero. ++ */ ++ ++struct PVSCSIRingCmpDesc { ++ u64 context; ++ u64 dataLen; ++ u32 senseLen; ++ u16 hostStatus; ++ u16 scsiStatus; ++ u32 _pad[2]; ++} __packed; ++ ++/* ++ * Interrupt status / IRQ bits. ++ */ ++ ++#define PVSCSI_INTR_CMPL_0 (1 << 0) ++#define PVSCSI_INTR_CMPL_1 (1 << 1) ++#define PVSCSI_INTR_CMPL_MASK MASK(2) ++ ++#define PVSCSI_INTR_MSG_0 (1 << 2) ++#define PVSCSI_INTR_MSG_1 (1 << 3) ++#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2) ++ ++#define PVSCSI_INTR_ALL_SUPPORTED MASK(4) ++ ++/* ++ * Number of MSI-X vectors supported. ++ */ ++#define PVSCSI_MAX_INTRS 24 ++ ++/* ++ * Enumeration of supported MSI-X vectors ++ */ ++#define PVSCSI_VECTOR_COMPLETION 0 ++ ++/* ++ * Misc constants for the rings. ++ */ ++ ++#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES ++#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES ++#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES ++ ++#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \ ++ (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc)) ++ ++#define PVSCSI_MAX_REQ_QUEUE_DEPTH \ ++ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE) ++ ++#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1 ++#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1 ++#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2 ++#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2 ++#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2 ++ ++enum PVSCSIMemSpace { ++ PVSCSI_MEM_SPACE_COMMAND_PAGE = 0, ++ PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1, ++ PVSCSI_MEM_SPACE_MISC_PAGE = 2, ++ PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4, ++ PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6, ++ PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7, ++}; ++ ++#define PVSCSI_MEM_SPACE_NUM_PAGES \ ++ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \ ++ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \ ++ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \ ++ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \ ++ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES) ++ ++#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE) ++ ++#endif /* _VMW_PVSCSI_H_ */ +diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c +index eadc1ab..2d81457 100644 +--- a/drivers/serial/kgdboc.c ++++ b/drivers/serial/kgdboc.c +@@ -18,7 +18,7 @@ + + #define MAX_CONFIG_LEN 40 + +-static struct kgdb_io kgdboc_io_ops; ++static const struct kgdb_io kgdboc_io_ops; + + /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ + static int configured = -1; +@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void) + module_put(THIS_MODULE); + } + +-static struct kgdb_io kgdboc_io_ops = { ++static const struct kgdb_io kgdboc_io_ops = { + .name = "kgdboc", + .read_char = kgdboc_get_char, + .write_char = kgdboc_put_char, +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index b76f246..7f41af7 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message) + EXPORT_SYMBOL_GPL(spi_sync); + + /* portable code must never pass more than 32 bytes */ +-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) ++#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES) + + static u8 *buf; + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index b9b37ff..19dfa23 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma) + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); + } + +-static struct vm_operations_struct binder_vm_ops = { ++static const struct vm_operations_struct binder_vm_ops = { + .open = binder_vma_open, + .close = binder_vma_close, + }; +diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c +index cda26bb..39fed3f 100644 +--- a/drivers/staging/b3dfg/b3dfg.c ++++ b/drivers/staging/b3dfg/b3dfg.c +@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma, + return VM_FAULT_NOPAGE; + } + +-static struct vm_operations_struct b3dfg_vm_ops = { ++static const struct vm_operations_struct b3dfg_vm_ops = { + .fault = b3dfg_vma_fault, + }; + +@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma) + return r; + } + +-static struct file_operations b3dfg_fops = { ++static const struct file_operations b3dfg_fops = { + .owner = THIS_MODULE, + .open = b3dfg_open, + .release = b3dfg_release, +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c +index 908f25a..c9a579b 100644 +--- a/drivers/staging/comedi/comedi_fops.c ++++ b/drivers/staging/comedi/comedi_fops.c +@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area) + mutex_unlock(&dev->mutex); + } + +-static struct vm_operations_struct comedi_vm_ops = { ++static const struct vm_operations_struct comedi_vm_ops = { + .close = comedi_unmap, + }; + +diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c +index e55a0db..577b776 100644 +--- a/drivers/staging/dream/qdsp5/adsp_driver.c ++++ b/drivers/staging/dream/qdsp5/adsp_driver.c +@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode) + static dev_t adsp_devno; + static struct class *adsp_class; + +-static struct file_operations adsp_fops = { ++static const struct file_operations adsp_fops = { + .owner = THIS_MODULE, + .open = adsp_open, + .unlocked_ioctl = adsp_ioctl, +diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c +index ad2390f..4116ee8 100644 +--- a/drivers/staging/dream/qdsp5/audio_aac.c ++++ b/drivers/staging/dream/qdsp5/audio_aac.c +@@ -1022,7 +1022,7 @@ done: + return rc; + } + +-static struct file_operations audio_aac_fops = { ++static const struct file_operations audio_aac_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, +diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c +index cd818a5..870b37b 100644 +--- a/drivers/staging/dream/qdsp5/audio_amrnb.c ++++ b/drivers/staging/dream/qdsp5/audio_amrnb.c +@@ -833,7 +833,7 @@ done: + return rc; + } + +-static struct file_operations audio_amrnb_fops = { ++static const struct file_operations audio_amrnb_fops = { + .owner = THIS_MODULE, + .open = audamrnb_open, + .release = audamrnb_release, +diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c +index 4b43e18..cedafda 100644 +--- a/drivers/staging/dream/qdsp5/audio_evrc.c ++++ b/drivers/staging/dream/qdsp5/audio_evrc.c +@@ -805,7 +805,7 @@ dma_fail: + return rc; + } + +-static struct file_operations audio_evrc_fops = { ++static const struct file_operations audio_evrc_fops = { + .owner = THIS_MODULE, + .open = audevrc_open, + .release = audevrc_release, +diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c +index 3d950a2..9431118 100644 +--- a/drivers/staging/dream/qdsp5/audio_in.c ++++ b/drivers/staging/dream/qdsp5/audio_in.c +@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file) + return 0; + } + +-static struct file_operations audio_fops = { ++static const struct file_operations audio_fops = { + .owner = THIS_MODULE, + .open = audio_in_open, + .release = audio_in_release, +@@ -922,7 +922,7 @@ static struct file_operations audio_fops = { + .unlocked_ioctl = audio_in_ioctl, + }; + +-static struct file_operations audpre_fops = { ++static const struct file_operations audpre_fops = { + .owner = THIS_MODULE, + .open = audpre_open, + .unlocked_ioctl = audpre_ioctl, +diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c +index b95574f..286c2f4 100644 +--- a/drivers/staging/dream/qdsp5/audio_mp3.c ++++ b/drivers/staging/dream/qdsp5/audio_mp3.c +@@ -941,7 +941,7 @@ done: + return rc; + } + +-static struct file_operations audio_mp3_fops = { ++static const struct file_operations audio_mp3_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, +diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c +index d1adcf6..f8f9833 100644 +--- a/drivers/staging/dream/qdsp5/audio_out.c ++++ b/drivers/staging/dream/qdsp5/audio_out.c +@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file) + return 0; + } + +-static struct file_operations audio_fops = { ++static const struct file_operations audio_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, +@@ -819,7 +819,7 @@ static struct file_operations audio_fops = { + .unlocked_ioctl = audio_ioctl, + }; + +-static struct file_operations audpp_fops = { ++static const struct file_operations audpp_fops = { + .owner = THIS_MODULE, + .open = audpp_open, + .unlocked_ioctl = audpp_ioctl, +diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c +index f0f50e3..f6b9dbc 100644 +--- a/drivers/staging/dream/qdsp5/audio_qcelp.c ++++ b/drivers/staging/dream/qdsp5/audio_qcelp.c +@@ -816,7 +816,7 @@ err: + return rc; + } + +-static struct file_operations audio_qcelp_fops = { ++static const struct file_operations audio_qcelp_fops = { + .owner = THIS_MODULE, + .open = audqcelp_open, + .release = audqcelp_release, +diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c +index 037d7ff..5469ec3 100644 +--- a/drivers/staging/dream/qdsp5/snd.c ++++ b/drivers/staging/dream/qdsp5/snd.c +@@ -242,7 +242,7 @@ err: + return rc; + } + +-static struct file_operations snd_fops = { ++static const struct file_operations snd_fops = { + .owner = THIS_MODULE, + .open = snd_open, + .release = snd_release, +diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c +index d4e7d88..0ea632a 100644 +--- a/drivers/staging/dream/smd/smd_qmi.c ++++ b/drivers/staging/dream/smd/smd_qmi.c +@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp) + return 0; + } + +-static struct file_operations qmi_fops = { ++static const struct file_operations qmi_fops = { + .owner = THIS_MODULE, + .read = qmi_read, + .write = qmi_write, +diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c +index cd3910b..ff053d3 100644 +--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c ++++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c +@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd, + return rc; + } + +-static struct file_operations rpcrouter_server_fops = { ++static const struct file_operations rpcrouter_server_fops = { + .owner = THIS_MODULE, + .open = rpcrouter_open, + .release = rpcrouter_release, +@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = { + .unlocked_ioctl = rpcrouter_ioctl, + }; + +-static struct file_operations rpcrouter_router_fops = { ++static const struct file_operations rpcrouter_router_fops = { + .owner = THIS_MODULE, + .open = rpcrouter_open, + .release = rpcrouter_release, +diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c +index c24e4e0..629999b 100644 +--- a/drivers/staging/dst/dcore.c ++++ b/drivers/staging/dst/dcore.c +@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode) + return 0; + } + +-static struct block_device_operations dst_blk_ops = { ++static const struct block_device_operations dst_blk_ops = { + .open = dst_bdev_open, + .release = dst_bdev_release, + .owner = THIS_MODULE, +@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl, + n->size = ctl->size; + + atomic_set(&n->refcnt, 1); +- atomic_long_set(&n->gen, 0); ++ atomic_long_set_unchecked(&n->gen, 0); + snprintf(n->name, sizeof(n->name), "%s", ctl->name); + + err = dst_node_sysfs_init(n); +@@ -855,7 +855,7 @@ static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) + struct dst_node *n = NULL, *tmp; + unsigned int hash; + +- if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { ++ if (!capable(CAP_SYS_ADMIN)) { + err = -EPERM; + goto out; + } +diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c +index 557d372..8d84422 100644 +--- a/drivers/staging/dst/trans.c ++++ b/drivers/staging/dst/trans.c +@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio) + t->error = 0; + t->retries = 0; + atomic_set(&t->refcnt, 1); +- t->gen = atomic_long_inc_return(&n->gen); ++ t->gen = atomic_long_inc_return_unchecked(&n->gen); + + t->enc = bio_data_dir(bio); + dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen); +diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c +index 94f7752..d051514 100644 +--- a/drivers/staging/et131x/et1310_tx.c ++++ b/drivers/staging/et131x/et1310_tx.c +@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, + struct net_device_stats *stats = &etdev->net_stats; + + if (pMpTcb->Flags & fMP_DEST_BROAD) +- atomic_inc(&etdev->Stats.brdcstxmt); ++ atomic_inc_unchecked(&etdev->Stats.brdcstxmt); + else if (pMpTcb->Flags & fMP_DEST_MULTI) +- atomic_inc(&etdev->Stats.multixmt); ++ atomic_inc_unchecked(&etdev->Stats.multixmt); + else +- atomic_inc(&etdev->Stats.unixmt); ++ atomic_inc_unchecked(&etdev->Stats.unixmt); + + if (pMpTcb->Packet) { + stats->tx_bytes += pMpTcb->Packet->len; +diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h +index 1dfe06f..f469b4d 100644 +--- a/drivers/staging/et131x/et131x_adapter.h ++++ b/drivers/staging/et131x/et131x_adapter.h +@@ -145,11 +145,11 @@ typedef struct _ce_stats_t { + * operations + */ + u32 unircv; /* # multicast packets received */ +- atomic_t unixmt; /* # multicast packets for Tx */ ++ atomic_unchecked_t unixmt; /* # multicast packets for Tx */ + u32 multircv; /* # multicast packets received */ +- atomic_t multixmt; /* # multicast packets for Tx */ ++ atomic_unchecked_t multixmt; /* # multicast packets for Tx */ + u32 brdcstrcv; /* # broadcast packets received */ +- atomic_t brdcstxmt; /* # broadcast packets for Tx */ ++ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */ + u32 norcvbuf; /* # Rx packets discarded */ + u32 noxmtbuf; /* # Tx packets discarded */ + +diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c +index 4bd353a..e28f455 100644 +--- a/drivers/staging/go7007/go7007-v4l2.c ++++ b/drivers/staging/go7007/go7007-v4l2.c +@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + return 0; + } + +-static struct vm_operations_struct go7007_vm_ops = { ++static const struct vm_operations_struct go7007_vm_ops = { + .open = go7007_vm_open, + .close = go7007_vm_close, + .fault = go7007_vm_fault, +diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c +index 366dc95..b974d87 100644 +--- a/drivers/staging/hv/Channel.c ++++ b/drivers/staging/hv/Channel.c +@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer, + + DPRINT_ENTER(VMBUS); + +- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle); +- atomic_inc(&gVmbusConnection.NextGpadlHandle); ++ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle); ++ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle); + + VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount); + ASSERT(msgInfo != NULL); +diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c +index b12237f..01ae28a 100644 +--- a/drivers/staging/hv/Hv.c ++++ b/drivers/staging/hv/Hv.c +@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output) + u64 outputAddress = (Output) ? virt_to_phys(Output) : 0; + u32 outputAddressHi = outputAddress >> 32; + u32 outputAddressLo = outputAddress & 0xFFFFFFFF; +- volatile void *hypercallPage = gHvContext.HypercallPage; ++ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage); + + DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>", + Control, Input, Output); +diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h +index d089bb1..2ebc158 100644 +--- a/drivers/staging/hv/VmbusApi.h ++++ b/drivers/staging/hv/VmbusApi.h +@@ -109,7 +109,7 @@ struct vmbus_channel_interface { + u32 *GpadlHandle); + int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle); + void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo); +-}; ++} __no_const; + + /* Base driver object */ + struct hv_driver { +diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h +index 5a37cce..6ecc88c 100644 +--- a/drivers/staging/hv/VmbusPrivate.h ++++ b/drivers/staging/hv/VmbusPrivate.h +@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE { + struct VMBUS_CONNECTION { + enum VMBUS_CONNECT_STATE ConnectState; + +- atomic_t NextGpadlHandle; ++ atomic_unchecked_t NextGpadlHandle; + + /* + * Represents channel interrupts. Each bit position represents a +diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c +index 871a202..ca50ddf 100644 +--- a/drivers/staging/hv/blkvsc_drv.c ++++ b/drivers/staging/hv/blkvsc_drv.c +@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE; + /* The one and only one */ + static struct blkvsc_driver_context g_blkvsc_drv; + +-static struct block_device_operations block_ops = { ++static const struct block_device_operations block_ops = { + .owner = THIS_MODULE, + .open = blkvsc_open, + .release = blkvsc_release, +diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c +index 6acc49a..fbc8d46 100644 +--- a/drivers/staging/hv/vmbus_drv.c ++++ b/drivers/staging/hv/vmbus_drv.c +@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj, + to_device_context(root_device_obj); + struct device_context *child_device_ctx = + to_device_context(child_device_obj); +- static atomic_t device_num = ATOMIC_INIT(0); ++ static atomic_unchecked_t device_num = ATOMIC_INIT(0); + + DPRINT_ENTER(VMBUS_DRV); + +@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj, + + /* Set the device name. Otherwise, device_register() will fail. */ + dev_set_name(&child_device_ctx->device, "vmbus_0_%d", +- atomic_inc_return(&device_num)); ++ atomic_inc_return_unchecked(&device_num)); + + /* The new device belongs to this bus */ + child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */ +diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h +index d926189..17b19fd 100644 +--- a/drivers/staging/iio/ring_generic.h ++++ b/drivers/staging/iio/ring_generic.h +@@ -87,7 +87,7 @@ struct iio_ring_access_funcs { + + int (*is_enabled)(struct iio_ring_buffer *ring); + int (*enable)(struct iio_ring_buffer *ring); +-}; ++} __no_const; + + /** + * struct iio_ring_buffer - general ring buffer structure +diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c +index 1b237b7..88c624e 100644 +--- a/drivers/staging/octeon/ethernet-rx.c ++++ b/drivers/staging/octeon/ethernet-rx.c +@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused) + /* Increment RX stats for virtual ports */ + if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { + #ifdef CONFIG_64BIT +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); +- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); ++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets); ++ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes); + #else +- atomic_add(1, (atomic_t *)&priv->stats.rx_packets); +- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); ++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets); ++ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes); + #endif + } + netif_receive_skb(skb); +@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused) + dev->name); + */ + #ifdef CONFIG_64BIT +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); ++ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped); + #else +- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); ++ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped); + #endif + dev_kfree_skb_irq(skb); + } +diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c +index 492c502..d9909f1 100644 +--- a/drivers/staging/octeon/ethernet.c ++++ b/drivers/staging/octeon/ethernet.c +@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) + * since the RX tasklet also increments it. + */ + #ifdef CONFIG_64BIT +- atomic64_add(rx_status.dropped_packets, +- (atomic64_t *)&priv->stats.rx_dropped); ++ atomic64_add_unchecked(rx_status.dropped_packets, ++ (atomic64_unchecked_t *)&priv->stats.rx_dropped); + #else +- atomic_add(rx_status.dropped_packets, +- (atomic_t *)&priv->stats.rx_dropped); ++ atomic_add_unchecked(rx_status.dropped_packets, ++ (atomic_unchecked_t *)&priv->stats.rx_dropped); + #endif + } + +diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h +index a35bd5d..28fff45 100644 +--- a/drivers/staging/otus/80211core/pub_zfi.h ++++ b/drivers/staging/otus/80211core/pub_zfi.h +@@ -531,7 +531,7 @@ struct zsCbFuncTbl + u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf); + + void (*zfcbHwWatchDogNotify)(zdev_t* dev); +-}; ++} __no_const; + + extern void zfZeroMemory(u8_t* va, u16_t length); + #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl)); +diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c +index c39a25f..696f5aa 100644 +--- a/drivers/staging/panel/panel.c ++++ b/drivers/staging/panel/panel.c +@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file) + return 0; + } + +-static struct file_operations lcd_fops = { ++static const struct file_operations lcd_fops = { + .write = lcd_write, + .open = lcd_open, + .release = lcd_release, +@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file) + return 0; + } + +-static struct file_operations keypad_fops = { ++static const struct file_operations keypad_fops = { + .read = keypad_read, /* read */ + .open = keypad_open, /* open */ + .release = keypad_release, /* close */ +diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c +index 270ebcb..37e46af 100644 +--- a/drivers/staging/phison/phison.c ++++ b/drivers/staging/phison/phison.c +@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = { + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations phison_ops = { ++static const struct ata_port_operations phison_ops = { + .inherits = &ata_bmdma_port_ops, + .prereset = phison_pre_reset, + }; +diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c +index 2eb8e3d..57616a7 100644 +--- a/drivers/staging/poch/poch.c ++++ b/drivers/staging/poch/poch.c +@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp, + return 0; + } + +-static struct file_operations poch_fops = { ++static const struct file_operations poch_fops = { + .owner = THIS_MODULE, + .open = poch_open, + .release = poch_release, +diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c +index 5d04bf5..d4baff2 100644 +--- a/drivers/staging/pohmelfs/config.c ++++ b/drivers/staging/pohmelfs/config.c +@@ -531,7 +531,7 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n + { + int err; + +- if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) ++ if (!capable(CAP_SYS_ADMIN)) + return; + + switch (msg->flags) { +diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c +index c94de31..19402bc 100644 +--- a/drivers/staging/pohmelfs/inode.c ++++ b/drivers/staging/pohmelfs/inode.c +@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) + mutex_init(&psb->mcache_lock); + psb->mcache_root = RB_ROOT; + psb->mcache_timeout = msecs_to_jiffies(5000); +- atomic_long_set(&psb->mcache_gen, 0); ++ atomic_long_set_unchecked(&psb->mcache_gen, 0); + + psb->trans_max_pages = 100; + +@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) + INIT_LIST_HEAD(&psb->crypto_ready_list); + INIT_LIST_HEAD(&psb->crypto_active_list); + +- atomic_set(&psb->trans_gen, 1); ++ atomic_set_unchecked(&psb->trans_gen, 1); + atomic_long_set(&psb->total_inodes, 0); + + mutex_init(&psb->state_lock); +diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c +index e22665c..a2a9390 100644 +--- a/drivers/staging/pohmelfs/mcache.c ++++ b/drivers/staging/pohmelfs/mcache.c +@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start + m->data = data; + m->start = start; + m->size = size; +- m->gen = atomic_long_inc_return(&psb->mcache_gen); ++ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen); + + mutex_lock(&psb->mcache_lock); + err = pohmelfs_mcache_insert(psb, m); +diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h +index 623a07d..4035c19 100644 +--- a/drivers/staging/pohmelfs/netfs.h ++++ b/drivers/staging/pohmelfs/netfs.h +@@ -570,14 +570,14 @@ struct pohmelfs_config; + struct pohmelfs_sb { + struct rb_root mcache_root; + struct mutex mcache_lock; +- atomic_long_t mcache_gen; ++ atomic_long_unchecked_t mcache_gen; + unsigned long mcache_timeout; + + unsigned int idx; + + unsigned int trans_retries; + +- atomic_t trans_gen; ++ atomic_unchecked_t trans_gen; + + unsigned int crypto_attached_size; + unsigned int crypto_align_size; +diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c +index 36a2535..0591bf4 100644 +--- a/drivers/staging/pohmelfs/trans.c ++++ b/drivers/staging/pohmelfs/trans.c +@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb) + int err; + struct netfs_cmd *cmd = t->iovec.iov_base; + +- t->gen = atomic_inc_return(&psb->trans_gen); ++ t->gen = atomic_inc_return_unchecked(&psb->trans_gen); + + cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + + t->attached_size + t->attached_pages * sizeof(struct netfs_cmd); +diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c +index f890a16..509ece8 100644 +--- a/drivers/staging/sep/sep_driver.c ++++ b/drivers/staging/sep/sep_driver.c +@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = { + static dev_t sep_devno; + + /* the files operations structure of the driver */ +-static struct file_operations sep_file_operations = { ++static const struct file_operations sep_file_operations = { + .owner = THIS_MODULE, + .ioctl = sep_ioctl, + .poll = sep_poll, +diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h +index 5e16bc3..7655b10 100644 +--- a/drivers/staging/usbip/usbip_common.h ++++ b/drivers/staging/usbip/usbip_common.h +@@ -374,7 +374,7 @@ struct usbip_device { + void (*shutdown)(struct usbip_device *); + void (*reset)(struct usbip_device *); + void (*unusable)(struct usbip_device *); +- } eh_ops; ++ } __no_const eh_ops; + }; + + +diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h +index 57f7946..d9df23d 100644 +--- a/drivers/staging/usbip/vhci.h ++++ b/drivers/staging/usbip/vhci.h +@@ -92,7 +92,7 @@ struct vhci_hcd { + unsigned resuming:1; + unsigned long re_timeout; + +- atomic_t seqnum; ++ atomic_unchecked_t seqnum; + + /* + * NOTE: +diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c +index 20cd7db..c2693ff 100644 +--- a/drivers/staging/usbip/vhci_hcd.c ++++ b/drivers/staging/usbip/vhci_hcd.c +@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb) + return; + } + +- priv->seqnum = atomic_inc_return(&the_controller->seqnum); ++ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); + if (priv->seqnum == 0xffff) + usbip_uinfo("seqnum max\n"); + +@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + return -ENOMEM; + } + +- unlink->seqnum = atomic_inc_return(&the_controller->seqnum); ++ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); + if (unlink->seqnum == 0xffff) + usbip_uinfo("seqnum max\n"); + +@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd) + vdev->rhport = rhport; + } + +- atomic_set(&vhci->seqnum, 0); ++ atomic_set_unchecked(&vhci->seqnum, 0); + spin_lock_init(&vhci->lock); + + +diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c +index 7fd76fe..673695a 100644 +--- a/drivers/staging/usbip/vhci_rx.c ++++ b/drivers/staging/usbip/vhci_rx.c +@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, + usbip_uerr("cannot find a urb of seqnum %u\n", + pdu->base.seqnum); + usbip_uinfo("max seqnum %d\n", +- atomic_read(&the_controller->seqnum)); ++ atomic_read_unchecked(&the_controller->seqnum)); + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + return; + } +diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c +index 7891288..8e31300 100644 +--- a/drivers/staging/vme/devices/vme_user.c ++++ b/drivers/staging/vme/devices/vme_user.c +@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int, + static int __init vme_user_probe(struct device *, int, int); + static int __exit vme_user_remove(struct device *, int, int); + +-static struct file_operations vme_user_fops = { ++static const struct file_operations vme_user_fops = { + .open = vme_user_open, + .release = vme_user_release, + .read = vme_user_read, +diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c +index 58abf44..00c1fc8 100644 +--- a/drivers/staging/vt6655/hostap.c ++++ b/drivers/staging/vt6655/hostap.c +@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + PSDevice apdev_priv; + struct net_device *dev = pDevice->dev; + int ret; +- const struct net_device_ops apdev_netdev_ops = { ++ net_device_ops_no_const apdev_netdev_ops = { + .ndo_start_xmit = pDevice->tx_80211, + }; + +diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c +index 0c8267a..db1f363 100644 +--- a/drivers/staging/vt6656/hostap.c ++++ b/drivers/staging/vt6656/hostap.c +@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + PSDevice apdev_priv; + struct net_device *dev = pDevice->dev; + int ret; +- const struct net_device_ops apdev_netdev_ops = { ++ net_device_ops_no_const apdev_netdev_ops = { + .ndo_start_xmit = pDevice->tx_80211, + }; + +diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c +index 925678b..da7f5ed 100644 +--- a/drivers/staging/wlan-ng/hfa384x_usb.c ++++ b/drivers/staging/wlan-ng/hfa384x_usb.c +@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx); + + struct usbctlx_completor { + int (*complete) (struct usbctlx_completor *); +-}; ++} __no_const; + typedef struct usbctlx_completor usbctlx_completor_t; + + static int +diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c +index 40de151..924f268 100644 +--- a/drivers/telephony/ixj.c ++++ b/drivers/telephony/ixj.c +@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j) + bool mContinue; + char *pIn, *pOut; + ++ pax_track_stack(); ++ + if (!SCI_Prepare(j)) + return 0; + +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c +index e941367..b631f5a 100644 +--- a/drivers/uio/uio.c ++++ b/drivers/uio/uio.c +@@ -23,6 +23,7 @@ + #include <linux/string.h> + #include <linux/kobject.h> + #include <linux/uio_driver.h> ++#include <asm/local.h> + + #define UIO_MAX_DEVICES 255 + +@@ -30,10 +31,10 @@ struct uio_device { + struct module *owner; + struct device *dev; + int minor; +- atomic_t event; ++ atomic_unchecked_t event; + struct fasync_struct *async_queue; + wait_queue_head_t wait; +- int vma_count; ++ local_t vma_count; + struct uio_info *info; + struct kobject *map_dir; + struct kobject *portio_dir; +@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, + return entry->show(mem, buf); + } + +-static struct sysfs_ops map_sysfs_ops = { ++static const struct sysfs_ops map_sysfs_ops = { + .show = map_type_show, + }; + +@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, + return entry->show(port, buf); + } + +-static struct sysfs_ops portio_sysfs_ops = { ++static const struct sysfs_ops portio_sysfs_ops = { + .show = portio_type_show, + }; + +@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev, + struct uio_device *idev = dev_get_drvdata(dev); + if (idev) + return sprintf(buf, "%u\n", +- (unsigned int)atomic_read(&idev->event)); ++ (unsigned int)atomic_read_unchecked(&idev->event)); + else + return -ENODEV; + } +@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info) + { + struct uio_device *idev = info->uio_dev; + +- atomic_inc(&idev->event); ++ atomic_inc_unchecked(&idev->event); + wake_up_interruptible(&idev->wait); + kill_fasync(&idev->async_queue, SIGIO, POLL_IN); + } +@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep) + } + + listener->dev = idev; +- listener->event_count = atomic_read(&idev->event); ++ listener->event_count = atomic_read_unchecked(&idev->event); + filep->private_data = listener; + + if (idev->info->open) { +@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait) + return -EIO; + + poll_wait(filep, &idev->wait, wait); +- if (listener->event_count != atomic_read(&idev->event)) ++ if (listener->event_count != atomic_read_unchecked(&idev->event)) + return POLLIN | POLLRDNORM; + return 0; + } +@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf, + do { + set_current_state(TASK_INTERRUPTIBLE); + +- event_count = atomic_read(&idev->event); ++ event_count = atomic_read_unchecked(&idev->event); + if (event_count != listener->event_count) { + if (copy_to_user(buf, &event_count, count)) + retval = -EFAULT; +@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma) + static void uio_vma_open(struct vm_area_struct *vma) + { + struct uio_device *idev = vma->vm_private_data; +- idev->vma_count++; ++ local_inc(&idev->vma_count); + } + + static void uio_vma_close(struct vm_area_struct *vma) + { + struct uio_device *idev = vma->vm_private_data; +- idev->vma_count--; ++ local_dec(&idev->vma_count); + } + + static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner, + idev->owner = owner; + idev->info = info; + init_waitqueue_head(&idev->wait); +- atomic_set(&idev->event, 0); ++ atomic_set_unchecked(&idev->event, 0); + + ret = uio_get_minor(idev); + if (ret) +diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c +index fbea856..06efea6 100644 +--- a/drivers/usb/atm/usbatm.c ++++ b/drivers/usb/atm/usbatm.c +@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (printk_ratelimit()) + atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", + __func__, vpi, vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + +@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (length > ATM_MAX_AAL5_PDU) { + atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", + __func__, length, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (sarb->len < pdu_length) { + atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", + __func__, pdu_length, sarb->len, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + + if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { + atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", + __func__, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (printk_ratelimit()) + atm_err(instance, "%s: no memory for skb (length: %u)!\n", + __func__, length); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto out; + } + +@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + + vcc->push(vcc, skb); + +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + out: + skb_trim(sarb, 0); + } +@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data) + struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; + + usbatm_pop(vcc, skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + skb = skb_dequeue(&instance->sndqueue); + } +@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag + if (!left--) + return sprintf(page, + "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", +- atomic_read(&atm_dev->stats.aal5.tx), +- atomic_read(&atm_dev->stats.aal5.tx_err), +- atomic_read(&atm_dev->stats.aal5.rx), +- atomic_read(&atm_dev->stats.aal5.rx_err), +- atomic_read(&atm_dev->stats.aal5.rx_drop)); ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); + + if (!left--) { + if (instance->disconnected) +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index 24e6205..fe5a5d4 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown); + + #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) + +-struct usb_mon_operations *mon_ops; ++const struct usb_mon_operations *mon_ops; + + /* + * The registration is unlocked. +@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops; + * symbols from usbcore, usbcore gets referenced and cannot be unloaded first. + */ + +-int usb_mon_register (struct usb_mon_operations *ops) ++int usb_mon_register (const struct usb_mon_operations *ops) + { + + if (mon_ops) +diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h +index bcbe104..9cfd1c6 100644 +--- a/drivers/usb/core/hcd.h ++++ b/drivers/usb/core/hcd.h +@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { } + #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) + + struct usb_mon_operations { +- void (*urb_submit)(struct usb_bus *bus, struct urb *urb); +- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err); +- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status); ++ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb); ++ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err); ++ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status); + /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ + }; + +-extern struct usb_mon_operations *mon_ops; ++extern const struct usb_mon_operations *mon_ops; + + static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) + { +@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, + (*mon_ops->urb_complete)(bus, urb, status); + } + +-int usb_mon_register(struct usb_mon_operations *ops); ++int usb_mon_register(const struct usb_mon_operations *ops); + void usb_mon_deregister(void); + + #else +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c +index 62ff5e7..530b74e 100644 +--- a/drivers/usb/misc/appledisplay.c ++++ b/drivers/usb/misc/appledisplay.c +@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd) + return pdata->msgdata[1]; + } + +-static struct backlight_ops appledisplay_bl_data = { ++static const struct backlight_ops appledisplay_bl_data = { + .get_brightness = appledisplay_bl_get_brightness, + .update_status = appledisplay_bl_update_status, + }; +diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c +index e0c2db3..bd8cb66 100644 +--- a/drivers/usb/mon/mon_main.c ++++ b/drivers/usb/mon/mon_main.c +@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = { + /* + * Ops + */ +-static struct usb_mon_operations mon_ops_0 = { ++static const struct usb_mon_operations mon_ops_0 = { + .urb_submit = mon_submit, + .urb_submit_error = mon_submit_error, + .urb_complete = mon_complete, +diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h +index d6bea3e..60b250e 100644 +--- a/drivers/usb/wusbcore/wa-hc.h ++++ b/drivers/usb/wusbcore/wa-hc.h +@@ -192,7 +192,7 @@ struct wahc { + struct list_head xfer_delayed_list; + spinlock_t xfer_list_lock; + struct work_struct xfer_work; +- atomic_t xfer_id_count; ++ atomic_unchecked_t xfer_id_count; + }; + + +@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa) + INIT_LIST_HEAD(&wa->xfer_delayed_list); + spin_lock_init(&wa->xfer_list_lock); + INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run); +- atomic_set(&wa->xfer_id_count, 1); ++ atomic_set_unchecked(&wa->xfer_id_count, 1); + } + + /** +diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c +index 613a5fc..3174865 100644 +--- a/drivers/usb/wusbcore/wa-xfer.c ++++ b/drivers/usb/wusbcore/wa-xfer.c +@@ -293,7 +293,7 @@ out: + */ + static void wa_xfer_id_init(struct wa_xfer *xfer) + { +- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); ++ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count); + } + + /* +diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c +index aa42fce..f8a828c 100644 +--- a/drivers/uwb/wlp/messages.c ++++ b/drivers/uwb/wlp/messages.c +@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb) + size_t len = skb->len; + size_t used; + ssize_t result; +- struct wlp_nonce enonce, rnonce; ++ struct wlp_nonce enonce = {{0}}, rnonce = {{0}}; + enum wlp_assc_error assc_err; + char enonce_buf[WLP_WSS_NONCE_STRSIZE]; + char rnonce_buf[WLP_WSS_NONCE_STRSIZE]; +diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c +index 0370399..6627c94 100644 +--- a/drivers/uwb/wlp/sysfs.c ++++ b/drivers/uwb/wlp/sysfs.c +@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr, + return ret; + } + +-static +-struct sysfs_ops wss_sysfs_ops = { ++static const struct sysfs_ops wss_sysfs_ops = { + .show = wlp_wss_attr_show, + .store = wlp_wss_attr_store, + }; +diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c +index 8c5e432..5ee90ea 100644 +--- a/drivers/video/atmel_lcdfb.c ++++ b/drivers/video/atmel_lcdfb.c +@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl) + return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL); + } + +-static struct backlight_ops atmel_lcdc_bl_ops = { ++static const struct backlight_ops atmel_lcdc_bl_ops = { + .update_status = atmel_bl_update_status, + .get_brightness = atmel_bl_get_brightness, + }; +diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c +index e4e4d43..66bcbcc 100644 +--- a/drivers/video/aty/aty128fb.c ++++ b/drivers/video/aty/aty128fb.c +@@ -149,7 +149,7 @@ enum { + }; + + /* Must match above enum */ +-static const char *r128_family[] __devinitdata = { ++static const char *r128_family[] __devinitconst = { + "AGP", + "PCI", + "PRO AGP", +@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd) + return bd->props.brightness; + } + +-static struct backlight_ops aty128_bl_data = { ++static const struct backlight_ops aty128_bl_data = { + .get_brightness = aty128_bl_get_brightness, + .update_status = aty128_bl_update_status, + }; +diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c +index 913b4a4..9295a38 100644 +--- a/drivers/video/aty/atyfb_base.c ++++ b/drivers/video/aty/atyfb_base.c +@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd) + return bd->props.brightness; + } + +-static struct backlight_ops aty_bl_data = { ++static const struct backlight_ops aty_bl_data = { + .get_brightness = aty_bl_get_brightness, + .update_status = aty_bl_update_status, + }; +diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c +index 1a056ad..221bd6a 100644 +--- a/drivers/video/aty/radeon_backlight.c ++++ b/drivers/video/aty/radeon_backlight.c +@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd) + return bd->props.brightness; + } + +-static struct backlight_ops radeon_bl_data = { ++static const struct backlight_ops radeon_bl_data = { + .get_brightness = radeon_bl_get_brightness, + .update_status = radeon_bl_update_status, + }; +diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c +index ad05da5..3cb2cb9 100644 +--- a/drivers/video/backlight/adp5520_bl.c ++++ b/drivers/video/backlight/adp5520_bl.c +@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl) + return error ? data->current_brightness : reg_val; + } + +-static struct backlight_ops adp5520_bl_ops = { ++static const struct backlight_ops adp5520_bl_ops = { + .update_status = adp5520_bl_update_status, + .get_brightness = adp5520_bl_get_brightness, + }; +diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c +index 2c3bdfc..d769b0b 100644 +--- a/drivers/video/backlight/adx_bl.c ++++ b/drivers/video/backlight/adx_bl.c +@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb) + return 1; + } + +-static struct backlight_ops adx_backlight_ops = { ++static const struct backlight_ops adx_backlight_ops = { + .options = 0, + .update_status = adx_backlight_update_status, + .get_brightness = adx_backlight_get_brightness, +diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c +index 505c082..6b6b3cc 100644 +--- a/drivers/video/backlight/atmel-pwm-bl.c ++++ b/drivers/video/backlight/atmel-pwm-bl.c +@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl) + return pwm_channel_enable(&pwmbl->pwmc); + } + +-static struct backlight_ops atmel_pwm_bl_ops = { ++static const struct backlight_ops atmel_pwm_bl_ops = { + .get_brightness = atmel_pwm_bl_get_intensity, + .update_status = atmel_pwm_bl_set_intensity, + }; +diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c +index 5e20e6e..89025e6 100644 +--- a/drivers/video/backlight/backlight.c ++++ b/drivers/video/backlight/backlight.c +@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update); + * ERR_PTR() or a pointer to the newly allocated device. + */ + struct backlight_device *backlight_device_register(const char *name, +- struct device *parent, void *devdata, struct backlight_ops *ops) ++ struct device *parent, void *devdata, const struct backlight_ops *ops) + { + struct backlight_device *new_bd; + int rc; +diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c +index 9677494..b4bcf80 100644 +--- a/drivers/video/backlight/corgi_lcd.c ++++ b/drivers/video/backlight/corgi_lcd.c +@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit) + } + EXPORT_SYMBOL(corgi_lcd_limit_intensity); + +-static struct backlight_ops corgi_bl_ops = { ++static const struct backlight_ops corgi_bl_ops = { + .get_brightness = corgi_bl_get_intensity, + .update_status = corgi_bl_update_status, + }; +diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c +index b9fe62b..2914bf1 100644 +--- a/drivers/video/backlight/cr_bllcd.c ++++ b/drivers/video/backlight/cr_bllcd.c +@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd) + return intensity; + } + +-static struct backlight_ops cr_backlight_ops = { ++static const struct backlight_ops cr_backlight_ops = { + .get_brightness = cr_backlight_get_intensity, + .update_status = cr_backlight_set_intensity, + }; +diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c +index 701a108..feacfd5 100644 +--- a/drivers/video/backlight/da903x_bl.c ++++ b/drivers/video/backlight/da903x_bl.c +@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl) + return data->current_brightness; + } + +-static struct backlight_ops da903x_backlight_ops = { ++static const struct backlight_ops da903x_backlight_ops = { + .update_status = da903x_backlight_update_status, + .get_brightness = da903x_backlight_get_brightness, + }; +diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c +index 6d27f62..e6d348e 100644 +--- a/drivers/video/backlight/generic_bl.c ++++ b/drivers/video/backlight/generic_bl.c +@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit) + } + EXPORT_SYMBOL(corgibl_limit_intensity); + +-static struct backlight_ops genericbl_ops = { ++static const struct backlight_ops genericbl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = genericbl_get_intensity, + .update_status = genericbl_send_intensity, +diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c +index 7fb4eef..f7cc528 100644 +--- a/drivers/video/backlight/hp680_bl.c ++++ b/drivers/video/backlight/hp680_bl.c +@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd) + return current_intensity; + } + +-static struct backlight_ops hp680bl_ops = { ++static const struct backlight_ops hp680bl_ops = { + .get_brightness = hp680bl_get_intensity, + .update_status = hp680bl_set_intensity, + }; +diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c +index 7aed256..db9071f 100644 +--- a/drivers/video/backlight/jornada720_bl.c ++++ b/drivers/video/backlight/jornada720_bl.c +@@ -93,7 +93,7 @@ out: + return ret; + } + +-static struct backlight_ops jornada_bl_ops = { ++static const struct backlight_ops jornada_bl_ops = { + .get_brightness = jornada_bl_get_brightness, + .update_status = jornada_bl_update_status, + .options = BL_CORE_SUSPENDRESUME, +diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c +index a38fda1..939e7b8 100644 +--- a/drivers/video/backlight/kb3886_bl.c ++++ b/drivers/video/backlight/kb3886_bl.c +@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd) + return kb3886bl_intensity; + } + +-static struct backlight_ops kb3886bl_ops = { ++static const struct backlight_ops kb3886bl_ops = { + .get_brightness = kb3886bl_get_intensity, + .update_status = kb3886bl_send_intensity, + }; +diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c +index 6b488b8..00a9591 100644 +--- a/drivers/video/backlight/locomolcd.c ++++ b/drivers/video/backlight/locomolcd.c +@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd) + return current_intensity; + } + +-static struct backlight_ops locomobl_data = { ++static const struct backlight_ops locomobl_data = { + .get_brightness = locomolcd_get_intensity, + .update_status = locomolcd_set_intensity, + }; +diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c +index 99bdfa8..3dac448 100644 +--- a/drivers/video/backlight/mbp_nvidia_bl.c ++++ b/drivers/video/backlight/mbp_nvidia_bl.c +@@ -33,7 +33,7 @@ struct dmi_match_data { + unsigned long iostart; + unsigned long iolen; + /* Backlight operations structure. */ +- struct backlight_ops backlight_ops; ++ const struct backlight_ops backlight_ops; + }; + + /* Module parameters. */ +diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c +index cbad67e8..3cf900e 100644 +--- a/drivers/video/backlight/omap1_bl.c ++++ b/drivers/video/backlight/omap1_bl.c +@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev) + return bl->current_intensity; + } + +-static struct backlight_ops omapbl_ops = { ++static const struct backlight_ops omapbl_ops = { + .get_brightness = omapbl_get_intensity, + .update_status = omapbl_update_status, + }; +diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c +index 9edaf24..075786e 100644 +--- a/drivers/video/backlight/progear_bl.c ++++ b/drivers/video/backlight/progear_bl.c +@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd) + return intensity - HW_LEVEL_MIN; + } + +-static struct backlight_ops progearbl_ops = { ++static const struct backlight_ops progearbl_ops = { + .get_brightness = progearbl_get_intensity, + .update_status = progearbl_set_intensity, + }; +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c +index 8871662..df9e0b3 100644 +--- a/drivers/video/backlight/pwm_bl.c ++++ b/drivers/video/backlight/pwm_bl.c +@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl) + return bl->props.brightness; + } + +-static struct backlight_ops pwm_backlight_ops = { ++static const struct backlight_ops pwm_backlight_ops = { + .update_status = pwm_backlight_update_status, + .get_brightness = pwm_backlight_get_brightness, + }; +diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c +index 43edbad..e14ce4d 100644 +--- a/drivers/video/backlight/tosa_bl.c ++++ b/drivers/video/backlight/tosa_bl.c +@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev) + return props->brightness; + } + +-static struct backlight_ops bl_ops = { ++static const struct backlight_ops bl_ops = { + .get_brightness = tosa_bl_get_brightness, + .update_status = tosa_bl_update_status, + }; +diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c +index 467bdb7..e32add3 100644 +--- a/drivers/video/backlight/wm831x_bl.c ++++ b/drivers/video/backlight/wm831x_bl.c +@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl) + return data->current_brightness; + } + +-static struct backlight_ops wm831x_backlight_ops = { ++static const struct backlight_ops wm831x_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, + .update_status = wm831x_backlight_update_status, + .get_brightness = wm831x_backlight_get_brightness, +diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c +index e49ae5e..db4e6f7 100644 +--- a/drivers/video/bf54x-lq043fb.c ++++ b/drivers/video/bf54x-lq043fb.c +@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd) + return 0; + } + +-static struct backlight_ops bfin_lq043fb_bl_ops = { ++static const struct backlight_ops bfin_lq043fb_bl_ops = { + .get_brightness = bl_get_brightness, + }; + +diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c +index 2c72a7c..d523e52 100644 +--- a/drivers/video/bfin-t350mcqb-fb.c ++++ b/drivers/video/bfin-t350mcqb-fb.c +@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd) + return 0; + } + +-static struct backlight_ops bfin_lq043fb_bl_ops = { ++static const struct backlight_ops bfin_lq043fb_bl_ops = { + .get_brightness = bl_get_brightness, + }; + +diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c +index f53b9f1..958bf4e 100644 +--- a/drivers/video/fbcmap.c ++++ b/drivers/video/fbcmap.c +@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) + rc = -ENODEV; + goto out; + } +- if (cmap->start < 0 || (!info->fbops->fb_setcolreg && +- !info->fbops->fb_setcmap)) { ++ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) { + rc = -EINVAL; + goto out1; + } +diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c +index 99bbd28..ad3829e 100644 +--- a/drivers/video/fbmem.c ++++ b/drivers/video/fbmem.c +@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, + image->dx += image->width + 8; + } + } else if (rotate == FB_ROTATE_UD) { +- for (x = 0; x < num && image->dx >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dx -= image->width + 8; + } +@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, + image->dy += image->height + 8; + } + } else if (rotate == FB_ROTATE_CCW) { +- for (x = 0; x < num && image->dy >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dy -= image->height + 8; + } +@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) + int flags = info->flags; + int ret = 0; + ++ pax_track_stack(); ++ + if (var->activate & FB_ACTIVATE_INV_MODE) { + struct fb_videomode mode1, mode2; + +@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + void __user *argp = (void __user *)arg; + long ret = 0; + ++ pax_track_stack(); ++ + switch (cmd) { + case FBIOGET_VSCREENINFO: + if (!lock_fb_info(info)) +@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + return -EFAULT; + if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) + return -EINVAL; +- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) ++ if (con2fb.framebuffer >= FB_MAX) + return -EINVAL; + if (!registered_fb[con2fb.framebuffer]) + request_module("fb%d", con2fb.framebuffer); +diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c +index f20eff8..3e4f622 100644 +--- a/drivers/video/geode/gx1fb_core.c ++++ b/drivers/video/geode/gx1fb_core.c +@@ -30,7 +30,7 @@ static int crt_option = 1; + static char panel_option[32] = ""; + + /* Modes relevant to the GX1 (taken from modedb.c) */ +-static const struct fb_videomode __initdata gx1_modedb[] = { ++static const struct fb_videomode __initconst gx1_modedb[] = { + /* 640x480-60 VESA */ + { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, +diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c +index 896e53d..4d87d0b 100644 +--- a/drivers/video/gxt4500.c ++++ b/drivers/video/gxt4500.c +@@ -156,7 +156,7 @@ struct gxt4500_par { + static char *mode_option; + + /* default mode: 1280x1024 @ 60 Hz, 8 bpp */ +-static const struct fb_videomode defaultmode __devinitdata = { ++static const struct fb_videomode defaultmode __devinitconst = { + .refresh = 60, + .xres = 1280, + .yres = 1024, +@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info) + return 0; + } + +-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = { ++static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = { + .id = "IBM GXT4500P", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_PSEUDOCOLOR, +diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c +index f5bedee..28c6028 100644 +--- a/drivers/video/i810/i810_accel.c ++++ b/drivers/video/i810/i810_accel.c +@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space) + } + } + printk("ringbuffer lockup!!!\n"); ++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); + i810_report_error(mmio); + par->dev_flags |= LOCKUP; + info->pixmap.scan_align = 1; +diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c +index 5743ea2..457f82c 100644 +--- a/drivers/video/i810/i810_main.c ++++ b/drivers/video/i810/i810_main.c +@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info); + static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par); + + /* PCI */ +-static const char *i810_pci_list[] __devinitdata = { ++static const char *i810_pci_list[] __devinitconst = { + "Intel(R) 810 Framebuffer Device" , + "Intel(R) 810-DC100 Framebuffer Device" , + "Intel(R) 810E Framebuffer Device" , +diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm +index 3c14e43..eafa544 100644 +--- a/drivers/video/logo/logo_linux_clut224.ppm ++++ b/drivers/video/logo/logo_linux_clut224.ppm +@@ -1,1604 +1,1123 @@ + P3 +-# Standard 224-color Linux logo + 80 80 + 255 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 6 6 6 10 10 10 10 10 10 +- 10 10 10 6 6 6 6 6 6 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 10 10 10 14 14 14 +- 22 22 22 26 26 26 30 30 30 34 34 34 +- 30 30 30 30 30 30 26 26 26 18 18 18 +- 14 14 14 10 10 10 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 26 26 26 42 42 42 +- 54 54 54 66 66 66 78 78 78 78 78 78 +- 78 78 78 74 74 74 66 66 66 54 54 54 +- 42 42 42 26 26 26 18 18 18 10 10 10 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 22 22 22 42 42 42 66 66 66 86 86 86 +- 66 66 66 38 38 38 38 38 38 22 22 22 +- 26 26 26 34 34 34 54 54 54 66 66 66 +- 86 86 86 70 70 70 46 46 46 26 26 26 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 26 26 26 +- 50 50 50 82 82 82 58 58 58 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 54 54 54 86 86 86 66 66 66 +- 38 38 38 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 22 22 22 50 50 50 +- 78 78 78 34 34 34 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 6 6 6 70 70 70 +- 78 78 78 46 46 46 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 42 42 42 82 82 82 +- 26 26 26 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 14 14 14 +- 46 46 46 34 34 34 6 6 6 2 2 6 +- 42 42 42 78 78 78 42 42 42 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 30 30 30 66 66 66 58 58 58 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 26 26 26 +- 86 86 86 101 101 101 46 46 46 10 10 10 +- 2 2 6 58 58 58 70 70 70 34 34 34 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 42 42 42 86 86 86 10 10 10 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 30 30 30 +- 94 94 94 94 94 94 58 58 58 26 26 26 +- 2 2 6 6 6 6 78 78 78 54 54 54 +- 22 22 22 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 62 62 62 62 62 62 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 26 26 26 +- 54 54 54 38 38 38 18 18 18 10 10 10 +- 2 2 6 2 2 6 34 34 34 82 82 82 +- 38 38 38 14 14 14 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 30 30 30 78 78 78 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 10 10 10 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 78 78 78 +- 50 50 50 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 14 14 14 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 54 54 54 +- 66 66 66 26 26 26 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 82 82 82 2 2 6 2 2 6 +- 2 2 6 6 6 6 10 10 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 14 14 14 10 10 10 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 18 18 18 +- 82 82 82 34 34 34 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 2 2 6 +- 6 6 6 6 6 6 22 22 22 34 34 34 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 34 34 34 +- 10 10 10 50 50 50 22 22 22 2 2 6 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 86 86 86 42 42 42 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 2 2 6 +- 38 38 38 116 116 116 94 94 94 22 22 22 +- 22 22 22 2 2 6 2 2 6 2 2 6 +- 14 14 14 86 86 86 138 138 138 162 162 162 +-154 154 154 38 38 38 26 26 26 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 86 86 86 46 46 46 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 14 14 14 +-134 134 134 198 198 198 195 195 195 116 116 116 +- 10 10 10 2 2 6 2 2 6 6 6 6 +-101 98 89 187 187 187 210 210 210 218 218 218 +-214 214 214 134 134 134 14 14 14 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 86 86 86 50 50 50 18 18 18 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 54 54 54 +-218 218 218 195 195 195 226 226 226 246 246 246 +- 58 58 58 2 2 6 2 2 6 30 30 30 +-210 210 210 253 253 253 174 174 174 123 123 123 +-221 221 221 234 234 234 74 74 74 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 82 82 82 2 2 6 106 106 106 +-170 170 170 26 26 26 86 86 86 226 226 226 +-123 123 123 10 10 10 14 14 14 46 46 46 +-231 231 231 190 190 190 6 6 6 70 70 70 +- 90 90 90 238 238 238 158 158 158 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 86 86 86 6 6 6 116 116 116 +-106 106 106 6 6 6 70 70 70 149 149 149 +-128 128 128 18 18 18 38 38 38 54 54 54 +-221 221 221 106 106 106 2 2 6 14 14 14 +- 46 46 46 190 190 190 198 198 198 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 62 62 62 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 94 94 94 14 14 14 101 101 101 +-128 128 128 2 2 6 18 18 18 116 116 116 +-118 98 46 121 92 8 121 92 8 98 78 10 +-162 162 162 106 106 106 2 2 6 2 2 6 +- 2 2 6 195 195 195 195 195 195 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 62 62 62 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 1 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 90 90 90 14 14 14 58 58 58 +-210 210 210 26 26 26 54 38 6 154 114 10 +-226 170 11 236 186 11 225 175 15 184 144 12 +-215 174 15 175 146 61 37 26 9 2 2 6 +- 70 70 70 246 246 246 138 138 138 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 66 66 66 26 26 26 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 14 14 14 10 10 10 +-195 195 195 188 164 115 192 133 9 225 175 15 +-239 182 13 234 190 10 232 195 16 232 200 30 +-245 207 45 241 208 19 232 195 16 184 144 12 +-218 194 134 211 206 186 42 42 42 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 50 50 50 74 74 74 30 30 30 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 86 86 86 14 14 14 2 2 6 +-121 87 25 192 133 9 219 162 10 239 182 13 +-236 186 11 232 195 16 241 208 19 244 214 54 +-246 218 60 246 218 38 246 215 20 241 208 19 +-241 208 19 226 184 13 121 87 25 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 50 50 50 82 82 82 34 34 34 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 82 82 82 30 30 30 61 42 6 +-180 123 7 206 145 10 230 174 11 239 182 13 +-234 190 10 238 202 15 241 208 19 246 218 74 +-246 218 38 246 215 20 246 215 20 246 215 20 +-226 184 13 215 174 15 184 144 12 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 26 26 26 94 94 94 42 42 42 14 14 14 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 50 50 50 104 69 6 +-192 133 9 216 158 10 236 178 12 236 186 11 +-232 195 16 241 208 19 244 214 54 245 215 43 +-246 215 20 246 215 20 241 208 19 198 155 10 +-200 144 11 216 158 10 156 118 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 90 90 90 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 46 46 46 22 22 22 +-137 92 6 210 162 10 239 182 13 238 190 10 +-238 202 15 241 208 19 246 215 20 246 215 20 +-241 208 19 203 166 17 185 133 11 210 150 10 +-216 158 10 210 150 10 102 78 10 2 2 6 +- 6 6 6 54 54 54 14 14 14 2 2 6 +- 2 2 6 62 62 62 74 74 74 30 30 30 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 78 78 78 50 50 50 6 6 6 +- 94 70 30 139 102 15 190 146 13 226 184 13 +-232 200 30 232 195 16 215 174 15 190 146 13 +-168 122 10 192 133 9 210 150 10 213 154 11 +-202 150 34 182 157 106 101 98 89 2 2 6 +- 2 2 6 78 78 78 116 116 116 58 58 58 +- 2 2 6 22 22 22 90 90 90 46 46 46 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 50 50 50 6 6 6 +-128 128 128 174 154 114 156 107 11 168 122 10 +-198 155 10 184 144 12 197 138 11 200 144 11 +-206 145 10 206 145 10 197 138 11 188 164 115 +-195 195 195 198 198 198 174 174 174 14 14 14 +- 2 2 6 22 22 22 116 116 116 116 116 116 +- 22 22 22 2 2 6 74 74 74 70 70 70 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 101 101 101 26 26 26 10 10 10 +-138 138 138 190 190 190 174 154 114 156 107 11 +-197 138 11 200 144 11 197 138 11 192 133 9 +-180 123 7 190 142 34 190 178 144 187 187 187 +-202 202 202 221 221 221 214 214 214 66 66 66 +- 2 2 6 2 2 6 50 50 50 62 62 62 +- 6 6 6 2 2 6 10 10 10 90 90 90 +- 50 50 50 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 34 34 34 +- 74 74 74 74 74 74 2 2 6 6 6 6 +-144 144 144 198 198 198 190 190 190 178 166 146 +-154 121 60 156 107 11 156 107 11 168 124 44 +-174 154 114 187 187 187 190 190 190 210 210 210 +-246 246 246 253 253 253 253 253 253 182 182 182 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 62 62 62 +- 74 74 74 34 34 34 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 10 10 10 22 22 22 54 54 54 +- 94 94 94 18 18 18 2 2 6 46 46 46 +-234 234 234 221 221 221 190 190 190 190 190 190 +-190 190 190 187 187 187 187 187 187 190 190 190 +-190 190 190 195 195 195 214 214 214 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +- 82 82 82 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 14 14 14 +- 86 86 86 54 54 54 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 46 46 46 90 90 90 +- 46 46 46 18 18 18 6 6 6 182 182 182 +-253 253 253 246 246 246 206 206 206 190 190 190 +-190 190 190 190 190 190 190 190 190 190 190 190 +-206 206 206 231 231 231 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-202 202 202 14 14 14 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 42 42 42 86 86 86 42 42 42 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 14 14 14 38 38 38 74 74 74 66 66 66 +- 2 2 6 6 6 6 90 90 90 250 250 250 +-253 253 253 253 253 253 238 238 238 198 198 198 +-190 190 190 190 190 190 195 195 195 221 221 221 +-246 246 246 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 82 82 82 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 78 78 78 70 70 70 34 34 34 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 34 34 34 66 66 66 78 78 78 6 6 6 +- 2 2 6 18 18 18 218 218 218 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-226 226 226 231 231 231 246 246 246 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 178 178 178 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 18 18 18 90 90 90 62 62 62 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 26 26 26 +- 58 58 58 90 90 90 18 18 18 2 2 6 +- 2 2 6 110 110 110 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 231 231 231 18 18 18 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 94 94 94 +- 54 54 54 26 26 26 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 22 22 22 50 50 50 +- 90 90 90 26 26 26 2 2 6 2 2 6 +- 14 14 14 195 195 195 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 242 242 242 54 54 54 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +- 86 86 86 50 50 50 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 38 38 38 82 82 82 +- 34 34 34 2 2 6 2 2 6 2 2 6 +- 42 42 42 195 195 195 246 246 246 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 242 242 242 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 246 246 246 238 238 238 +-226 226 226 231 231 231 101 101 101 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 38 38 38 82 82 82 42 42 42 14 14 14 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 26 26 26 62 62 62 66 66 66 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 70 70 70 170 170 170 206 206 206 234 234 234 +-246 246 246 250 250 250 250 250 250 238 238 238 +-226 226 226 231 231 231 238 238 238 250 250 250 +-250 250 250 250 250 250 246 246 246 231 231 231 +-214 214 214 206 206 206 202 202 202 202 202 202 +-198 198 198 202 202 202 182 182 182 18 18 18 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 62 62 62 66 66 66 30 30 30 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 42 42 42 82 82 82 18 18 18 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 94 94 94 182 182 182 218 218 218 242 242 242 +-250 250 250 253 253 253 253 253 253 250 250 250 +-234 234 234 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-238 238 238 226 226 226 210 210 210 202 202 202 +-195 195 195 195 195 195 210 210 210 158 158 158 +- 6 6 6 14 14 14 50 50 50 14 14 14 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 86 86 86 46 46 46 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 54 54 54 70 70 70 2 2 6 +- 2 2 6 10 10 10 2 2 6 22 22 22 +-166 166 166 231 231 231 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-231 231 231 206 206 206 198 198 198 226 226 226 +- 94 94 94 2 2 6 6 6 6 38 38 38 +- 30 30 30 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 62 62 62 66 66 66 +- 26 26 26 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 74 74 74 50 50 50 2 2 6 +- 26 26 26 26 26 26 2 2 6 106 106 106 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 246 246 246 218 218 218 202 202 202 +-210 210 210 14 14 14 2 2 6 2 2 6 +- 30 30 30 22 22 22 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 86 86 86 +- 42 42 42 14 14 14 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 90 90 90 22 22 22 2 2 6 +- 42 42 42 2 2 6 18 18 18 218 218 218 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 250 250 250 221 221 221 +-218 218 218 101 101 101 2 2 6 14 14 14 +- 18 18 18 38 38 38 10 10 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 78 78 78 +- 58 58 58 22 22 22 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 54 54 54 82 82 82 2 2 6 26 26 26 +- 22 22 22 2 2 6 123 123 123 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-238 238 238 198 198 198 6 6 6 38 38 38 +- 58 58 58 26 26 26 38 38 38 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 78 78 78 30 30 30 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 30 30 30 +- 74 74 74 58 58 58 2 2 6 42 42 42 +- 2 2 6 22 22 22 231 231 231 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 246 246 246 46 46 46 38 38 38 +- 42 42 42 14 14 14 38 38 38 14 14 14 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 86 86 86 46 46 46 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 42 42 42 +- 90 90 90 18 18 18 18 18 18 26 26 26 +- 2 2 6 116 116 116 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 250 250 250 238 238 238 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 94 94 94 6 6 6 +- 2 2 6 2 2 6 10 10 10 34 34 34 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 10 10 10 26 26 26 66 66 66 +- 82 82 82 2 2 6 38 38 38 6 6 6 +- 14 14 14 210 210 210 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 246 246 246 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 144 144 144 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 42 42 42 74 74 74 30 30 30 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 42 42 42 90 90 90 +- 26 26 26 6 6 6 42 42 42 2 2 6 +- 74 74 74 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 242 242 242 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 182 182 182 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 10 10 10 86 86 86 38 38 38 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 26 26 26 66 66 66 82 82 82 +- 2 2 6 22 22 22 18 18 18 2 2 6 +-149 149 149 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 206 206 206 2 2 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 86 86 86 46 46 46 14 14 14 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 18 18 18 46 46 46 86 86 86 18 18 18 +- 2 2 6 34 34 34 10 10 10 6 6 6 +-210 210 210 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 221 221 221 6 6 6 +- 2 2 6 2 2 6 6 6 6 30 30 30 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 82 82 82 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 26 26 26 66 66 66 62 62 62 2 2 6 +- 2 2 6 38 38 38 10 10 10 26 26 26 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 238 238 238 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 6 6 6 +- 2 2 6 2 2 6 10 10 10 30 30 30 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 58 58 58 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 78 78 78 6 6 6 2 2 6 +- 2 2 6 46 46 46 14 14 14 42 42 42 +-246 246 246 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 10 10 10 +- 2 2 6 2 2 6 22 22 22 14 14 14 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 62 62 62 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 74 74 74 2 2 6 2 2 6 +- 14 14 14 70 70 70 34 34 34 62 62 62 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 14 14 14 +- 2 2 6 2 2 6 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 62 62 62 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 54 54 54 62 62 62 2 2 6 2 2 6 +- 2 2 6 30 30 30 46 46 46 70 70 70 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 226 226 226 10 10 10 +- 2 2 6 6 6 6 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 58 58 58 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 58 58 58 62 62 62 2 2 6 2 2 6 +- 2 2 6 2 2 6 30 30 30 78 78 78 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 206 206 206 2 2 6 +- 22 22 22 34 34 34 18 14 6 22 22 22 +- 26 26 26 18 18 18 6 6 6 2 2 6 +- 2 2 6 82 82 82 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 26 26 26 +- 62 62 62 106 106 106 74 54 14 185 133 11 +-210 162 10 121 92 8 6 6 6 62 62 62 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 158 158 158 18 18 18 +- 14 14 14 2 2 6 2 2 6 2 2 6 +- 6 6 6 18 18 18 66 66 66 38 38 38 +- 6 6 6 94 94 94 50 50 50 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 10 10 10 10 10 10 18 18 18 38 38 38 +- 78 78 78 142 134 106 216 158 10 242 186 14 +-246 190 14 246 190 14 156 118 10 10 10 10 +- 90 90 90 238 238 238 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 230 190 +-238 204 91 238 204 91 181 142 44 37 26 9 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 38 38 38 46 46 46 +- 26 26 26 106 106 106 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 22 22 22 +- 30 30 30 38 38 38 50 50 50 70 70 70 +-106 106 106 190 142 34 226 170 11 242 186 14 +-246 190 14 246 190 14 246 190 14 154 114 10 +- 6 6 6 74 74 74 226 226 226 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 228 184 62 +-241 196 14 241 208 19 232 195 16 38 30 10 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 30 30 30 26 26 26 +-203 166 17 154 142 90 66 66 66 26 26 26 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 38 38 38 58 58 58 +- 78 78 78 86 86 86 101 101 101 123 123 123 +-175 146 61 210 150 10 234 174 13 246 186 14 +-246 190 14 246 190 14 246 190 14 238 190 10 +-102 78 10 2 2 6 46 46 46 198 198 198 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 224 178 62 +-242 186 14 241 196 14 210 166 10 22 18 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 6 6 6 121 92 8 +-238 202 15 232 195 16 82 82 82 34 34 34 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 38 38 38 70 70 70 154 122 46 +-190 142 34 200 144 11 197 138 11 197 138 11 +-213 154 11 226 170 11 242 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-225 175 15 46 32 6 2 2 6 22 22 22 +-158 158 158 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 242 242 242 224 178 62 +-239 182 13 236 186 11 213 154 11 46 32 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 61 42 6 225 175 15 +-238 190 10 236 186 11 112 100 78 42 42 42 +- 14 14 14 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 54 54 54 154 122 46 213 154 11 +-226 170 11 230 174 11 226 170 11 226 170 11 +-236 178 12 242 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-241 196 14 184 144 12 10 10 10 2 2 6 +- 6 6 6 116 116 116 242 242 242 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 231 231 231 198 198 198 214 170 54 +-236 178 12 236 178 12 210 150 10 137 92 6 +- 18 14 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 70 47 6 200 144 11 236 178 12 +-239 182 13 239 182 13 124 112 88 58 58 58 +- 22 22 22 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 70 70 70 180 133 36 226 170 11 +-239 182 13 242 186 14 242 186 14 246 186 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 232 195 16 98 70 6 2 2 6 +- 2 2 6 2 2 6 66 66 66 221 221 221 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 206 206 206 198 198 198 214 166 58 +-230 174 11 230 174 11 216 158 10 192 133 9 +-163 110 8 116 81 8 102 78 10 116 81 8 +-167 114 7 197 138 11 226 170 11 239 182 13 +-242 186 14 242 186 14 162 146 94 78 78 78 +- 34 34 34 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 30 30 30 78 78 78 190 142 34 226 170 11 +-239 182 13 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 241 196 14 203 166 17 22 18 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +-218 218 218 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 206 206 206 198 198 198 202 162 69 +-226 170 11 236 178 12 224 166 10 210 150 10 +-200 144 11 197 138 11 192 133 9 197 138 11 +-210 150 10 226 170 11 242 186 14 246 190 14 +-246 190 14 246 186 14 225 175 15 124 112 88 +- 62 62 62 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 174 135 50 224 166 10 +-239 182 13 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 241 196 14 139 102 15 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 78 78 78 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 214 214 214 198 198 198 190 150 46 +-219 162 10 236 178 12 234 174 13 224 166 10 +-216 158 10 213 154 11 213 154 11 216 158 10 +-226 170 11 239 182 13 246 190 14 246 190 14 +-246 190 14 246 190 14 242 186 14 206 162 42 +-101 101 101 58 58 58 30 30 30 14 14 14 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 74 74 74 174 135 50 216 158 10 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 241 196 14 226 184 13 +- 61 42 6 2 2 6 2 2 6 2 2 6 +- 22 22 22 238 238 238 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 226 226 226 187 187 187 180 133 36 +-216 158 10 236 178 12 239 182 13 236 178 12 +-230 174 11 226 170 11 226 170 11 230 174 11 +-236 178 12 242 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 186 14 239 182 13 +-206 162 42 106 106 106 66 66 66 34 34 34 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 26 26 26 70 70 70 163 133 67 213 154 11 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 241 196 14 +-190 146 13 18 14 6 2 2 6 2 2 6 +- 46 46 46 246 246 246 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 221 221 221 86 86 86 156 107 11 +-216 158 10 236 178 12 242 186 14 246 186 14 +-242 186 14 239 182 13 239 182 13 242 186 14 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-242 186 14 225 175 15 142 122 72 66 66 66 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 26 26 26 70 70 70 163 133 67 210 150 10 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-232 195 16 121 92 8 34 34 34 106 106 106 +-221 221 221 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-242 242 242 82 82 82 18 14 6 163 110 8 +-216 158 10 236 178 12 242 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 242 186 14 163 133 67 +- 46 46 46 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 163 133 67 210 150 10 +-236 178 12 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-241 196 14 215 174 15 190 178 144 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 218 218 218 +- 58 58 58 2 2 6 22 18 6 167 114 7 +-216 158 10 236 178 12 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 186 14 242 186 14 190 150 46 +- 54 54 54 22 22 22 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 38 38 38 86 86 86 180 133 36 213 154 11 +-236 178 12 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 232 195 16 190 146 13 214 214 214 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 170 170 170 26 26 26 +- 2 2 6 2 2 6 37 26 9 163 110 8 +-219 162 10 239 182 13 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 236 178 12 224 166 10 142 122 72 +- 46 46 46 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 109 106 95 192 133 9 224 166 10 +-242 186 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-242 186 14 226 184 13 210 162 10 142 110 46 +-226 226 226 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-198 198 198 66 66 66 2 2 6 2 2 6 +- 2 2 6 2 2 6 50 34 6 156 107 11 +-219 162 10 239 182 13 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 242 186 14 +-234 174 13 213 154 11 154 122 46 66 66 66 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 58 58 58 154 121 60 206 145 10 234 174 13 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 236 178 12 210 162 10 163 110 8 +- 61 42 6 138 138 138 218 218 218 250 250 250 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 210 210 210 144 144 144 66 66 66 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 61 42 6 163 110 8 +-216 158 10 236 178 12 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 239 182 13 230 174 11 216 158 10 +-190 142 34 124 112 88 70 70 70 38 38 38 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 62 62 62 168 124 44 206 145 10 224 166 10 +-236 178 12 239 182 13 242 186 14 242 186 14 +-246 186 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 236 178 12 216 158 10 175 118 6 +- 80 54 7 2 2 6 6 6 6 30 30 30 +- 54 54 54 62 62 62 50 50 50 38 38 38 +- 14 14 14 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 80 54 7 167 114 7 +-213 154 11 236 178 12 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 242 186 14 239 182 13 239 182 13 +-230 174 11 210 150 10 174 135 50 124 112 88 +- 82 82 82 54 54 54 34 34 34 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 158 118 36 192 133 9 200 144 11 +-216 158 10 219 162 10 224 166 10 226 170 11 +-230 174 11 236 178 12 239 182 13 239 182 13 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 230 174 11 210 150 10 163 110 8 +-104 69 6 10 10 10 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 91 60 6 167 114 7 +-206 145 10 230 174 11 242 186 14 246 190 14 +-246 190 14 246 190 14 246 186 14 242 186 14 +-239 182 13 230 174 11 224 166 10 213 154 11 +-180 133 36 124 112 88 86 86 86 58 58 58 +- 38 38 38 22 22 22 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 34 34 34 70 70 70 138 110 50 158 118 36 +-167 114 7 180 123 7 192 133 9 197 138 11 +-200 144 11 206 145 10 213 154 11 219 162 10 +-224 166 10 230 174 11 239 182 13 242 186 14 +-246 186 14 246 186 14 246 186 14 246 186 14 +-239 182 13 216 158 10 185 133 11 152 99 6 +-104 69 6 18 14 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 80 54 7 152 99 6 +-192 133 9 219 162 10 236 178 12 239 182 13 +-246 186 14 242 186 14 239 182 13 236 178 12 +-224 166 10 206 145 10 192 133 9 154 121 60 +- 94 94 94 62 62 62 42 42 42 22 22 22 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 18 18 18 34 34 34 58 58 58 78 78 78 +-101 98 89 124 112 88 142 110 46 156 107 11 +-163 110 8 167 114 7 175 118 6 180 123 7 +-185 133 11 197 138 11 210 150 10 219 162 10 +-226 170 11 236 178 12 236 178 12 234 174 13 +-219 162 10 197 138 11 163 110 8 130 83 6 +- 91 60 6 10 10 10 2 2 6 2 2 6 +- 18 18 18 38 38 38 38 38 38 38 38 38 +- 38 38 38 38 38 38 38 38 38 38 38 38 +- 38 38 38 38 38 38 26 26 26 2 2 6 +- 2 2 6 6 6 6 70 47 6 137 92 6 +-175 118 6 200 144 11 219 162 10 230 174 11 +-234 174 13 230 174 11 219 162 10 210 150 10 +-192 133 9 163 110 8 124 112 88 82 82 82 +- 50 50 50 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 22 22 22 34 34 34 +- 42 42 42 58 58 58 74 74 74 86 86 86 +-101 98 89 122 102 70 130 98 46 121 87 25 +-137 92 6 152 99 6 163 110 8 180 123 7 +-185 133 11 197 138 11 206 145 10 200 144 11 +-180 123 7 156 107 11 130 83 6 104 69 6 +- 50 34 6 54 54 54 110 110 110 101 98 89 +- 86 86 86 82 82 82 78 78 78 78 78 78 +- 78 78 78 78 78 78 78 78 78 78 78 78 +- 78 78 78 82 82 82 86 86 86 94 94 94 +-106 106 106 101 101 101 86 66 34 124 80 6 +-156 107 11 180 123 7 192 133 9 200 144 11 +-206 145 10 200 144 11 192 133 9 175 118 6 +-139 102 15 109 106 95 70 70 70 42 42 42 +- 22 22 22 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 10 10 10 +- 14 14 14 22 22 22 30 30 30 38 38 38 +- 50 50 50 62 62 62 74 74 74 90 90 90 +-101 98 89 112 100 78 121 87 25 124 80 6 +-137 92 6 152 99 6 152 99 6 152 99 6 +-138 86 6 124 80 6 98 70 6 86 66 30 +-101 98 89 82 82 82 58 58 58 46 46 46 +- 38 38 38 34 34 34 34 34 34 34 34 34 +- 34 34 34 34 34 34 34 34 34 34 34 34 +- 34 34 34 34 34 34 38 38 38 42 42 42 +- 54 54 54 82 82 82 94 86 76 91 60 6 +-134 86 6 156 107 11 167 114 7 175 118 6 +-175 118 6 167 114 7 152 99 6 121 87 25 +-101 98 89 62 62 62 34 34 34 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 6 6 6 10 10 10 +- 18 18 18 22 22 22 30 30 30 42 42 42 +- 50 50 50 66 66 66 86 86 86 101 98 89 +-106 86 58 98 70 6 104 69 6 104 69 6 +-104 69 6 91 60 6 82 62 34 90 90 90 +- 62 62 62 38 38 38 22 22 22 14 14 14 +- 10 10 10 10 10 10 10 10 10 10 10 10 +- 10 10 10 10 10 10 6 6 6 10 10 10 +- 10 10 10 10 10 10 10 10 10 14 14 14 +- 22 22 22 42 42 42 70 70 70 89 81 66 +- 80 54 7 104 69 6 124 80 6 137 92 6 +-134 86 6 116 81 8 100 82 52 86 86 86 +- 58 58 58 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 10 10 10 14 14 14 +- 18 18 18 26 26 26 38 38 38 54 54 54 +- 70 70 70 86 86 86 94 86 76 89 81 66 +- 89 81 66 86 86 86 74 74 74 50 50 50 +- 30 30 30 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 34 34 34 58 58 58 +- 82 82 82 89 81 66 89 81 66 89 81 66 +- 94 86 66 94 86 76 74 74 74 50 50 50 +- 26 26 26 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 6 6 6 14 14 14 18 18 18 +- 30 30 30 38 38 38 46 46 46 54 54 54 +- 50 50 50 42 42 42 30 30 30 18 18 18 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 26 26 26 +- 38 38 38 50 50 50 58 58 58 58 58 58 +- 54 54 54 42 42 42 30 30 30 18 18 18 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 6 6 6 10 10 10 14 14 14 18 18 18 +- 18 18 18 14 14 14 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 14 14 14 18 18 18 22 22 22 22 22 22 +- 18 18 18 14 14 14 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0 ++0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28 ++37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6 ++2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 ++4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0 ++1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137 ++153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125 ++60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4 ++4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35 ++2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0 ++4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167 ++165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63 ++1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 ++3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167 ++163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5 ++0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159 ++37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 ++37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158 ++156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166 ++125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 ++0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158 ++174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1 ++0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196 ++64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134 ++156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157 ++156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167 ++174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0 ++1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0 ++13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153 ++174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2 ++22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193 ++90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3 ++0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174 ++174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155 ++156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153 ++163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17 ++4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 ++5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63 ++131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174 ++190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103 ++90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196 ++31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0 ++4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163 ++155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165 ++167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155 ++153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131 ++41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4 ++1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174 ++177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137 ++125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209 ++136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122 ++7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37 ++125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155 ++156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155 ++137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156 ++156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174 ++167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0 ++0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174 ++166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6 ++6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196 ++90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14 ++1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153 ++167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156 ++157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68 ++26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166 ++158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158 ++165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17 ++60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165 ++137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21 ++52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146 ++13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0 ++4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 ++0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166 ++158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158 ++167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0 ++4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158 ++174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156 ++155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125 ++137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125 ++16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188 ++136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14 ++2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5 ++4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0 ++37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157 ++157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174 ++153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 ++4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37 ++125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154 ++156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163 ++174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0 ++4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211 ++136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2 ++1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4 ++2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0 ++0 0 0 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 ++4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127 ++158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156 ++153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125 ++37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4 ++4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0 ++4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165 ++154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174 ++174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3 ++32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193 ++28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5 ++50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1 ++0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81 ++2 0 0 0 0 0 ++4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2 ++0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174 ++174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153 ++165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6 ++4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3 ++4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174 ++174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158 ++60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148 ++136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13 ++22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132 ++136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0 ++26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165 ++37 38 37 0 0 0 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 ++13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165 ++153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174 ++177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0 ++4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5 ++5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5 ++6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84 ++166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27 ++4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220 ++146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103 ++71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196 ++90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28 ++125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174 ++85 115 134 4 0 0 ++4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55 ++125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153 ++155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154 ++125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 ++0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4 ++5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6 ++37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0 ++4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209 ++90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103 ++2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93 ++13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137 ++166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174 ++60 73 81 4 0 0 ++4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174 ++174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155 ++156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37 ++4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3 ++10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0 ++4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55 ++80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209 ++28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13 ++50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1 ++1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174 ++167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125 ++16 19 21 4 0 0 ++4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174 ++158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 ++167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0 ++4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4 ++4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86 ++80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1 ++4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5 ++3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209 ++146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 ++68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193 ++136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0 ++24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165 ++163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28 ++4 0 0 4 3 3 ++3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158 ++156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174 ++155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 ++2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196 ++136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0 ++0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0 ++0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211 ++136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193 ++28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193 ++22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81 ++137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153 ++60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0 ++3 2 2 4 4 4 ++3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158 ++157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125 ++37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 ++0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196 ++101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126 ++14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ++22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209 ++136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13 ++17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15 ++2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163 ++166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63 ++13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2 ++4 4 4 4 4 4 ++1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153 ++163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6 ++4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4 ++4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18 ++40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209 ++101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126 ++136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 ++136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103 ++136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5 ++3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167 ++174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0 ++4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131 ++155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0 ++4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5 ++4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159 ++101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 ++136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211 ++136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196 ++136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220 ++90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17 ++85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174 ++167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3 ++6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5 ++5 5 5 5 5 5 ++1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125 ++131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0 ++6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1 ++0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196 ++101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209 ++136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209 ++101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141 ++7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154 ++174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125 ++24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 ++5 5 5 4 4 4 ++4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131 ++131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3 ++6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 ++13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 ++101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 ++136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196 ++136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8 ++2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174 ++174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137 ++137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2 ++4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72 ++64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 ++101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7 ++37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166 ++167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0 ++3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137 ++153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2 ++4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209 ++101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196 ++101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193 ++35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84 ++154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157 ++60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137 ++153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2 ++4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193 ++64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 ++13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165 ++174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81 ++6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153 ++156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2 ++4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161 ++90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 ++101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8 ++2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158 ++174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153 ++158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2 ++4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161 ++37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 ++90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7 ++5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154 ++167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37 ++6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154 ++163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2 ++4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151 ++18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141 ++13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5 ++3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158 ++174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63 ++4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158 ++167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2 ++4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 ++26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17 ++7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5 ++4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158 ++174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163 ++174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3 ++5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196 ++101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5 ++2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5 ++3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137 ++153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166 ++174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161 ++35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8 ++2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5 ++3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125 ++131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37 ++4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167 ++174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25 ++7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3 ++4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 ++174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161 ++18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193 ++26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3 ++28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3 ++3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4 ++4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 ++174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 ++10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161 ++90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35 ++3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174 ++177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 ++10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151 ++26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93 ++6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193 ++10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93 ++2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174 ++177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2 ++7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34 ++3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34 ++21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 ++190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 ++10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144 ++24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52 ++18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0 ++28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93 ++26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174 ++190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14 ++0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161 ++26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52 ++37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161 ++90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0 ++4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 ++193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2 ++5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7 ++1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52 ++22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2 ++2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161 ++26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52 ++10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 ++193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2 ++5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25 ++13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161 ++10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2 ++5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25 ++28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151 ++28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 ++193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3 ++5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5 ++4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151 ++10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151 ++18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 ++22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4 ++4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2 ++6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3 ++1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151 ++18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144 ++10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144 ++26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14 ++1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4 ++5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174 ++193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0 ++2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 ++4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 ++10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161 ++26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0 ++2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5 ++3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137 ++131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34 ++0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4 ++4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7 ++13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144 ++10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151 ++28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4 ++4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0 ++0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131 ++125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174 ++193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203 ++120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4 ++4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2 ++4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25 ++4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2 ++24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125 ++125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221 ++174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 ++220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0 ++3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5 ++4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144 ++10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2 ++1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4 ++5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81 ++137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131 ++125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8 ++0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221 ++193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 ++220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6 ++4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25 ++22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3 ++4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166 ++166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125 ++125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167 ++220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 ++205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125 ++24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7 ++4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4 ++4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0 ++2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166 ++156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137 ++137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28 ++125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 ++193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3 ++1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4 ++5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17 ++60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163 ++153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137 ++125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221 ++193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246 ++244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0 ++0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5 ++4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6 ++3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156 ++220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154 ++153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81 ++13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6 ++6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246 ++244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203 ++220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37 ++3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1 ++0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221 ++177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157 ++158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81 ++177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221 ++220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215 ++125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0 ++37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174 ++174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167 ++158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 ++244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0 ++0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127 ++177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187 ++174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137 ++60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0 ++6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221 ++220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221 ++220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2 ++0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214 ++220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174 ++174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27 ++4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 ++4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167 ++220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215 ++205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137 ++60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203 ++177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187 ++190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0 ++4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 ++125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215 ++205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221 ++193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187 ++190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201 ++153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 ++6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0 ++4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221 ++205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215 ++220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174 ++174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125 ++6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221 ++220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 ++190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203 ++193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0 ++4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 ++4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6 ++6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81 ++174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174 ++193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221 ++193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0 ++6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 ++5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3 ++5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 ++6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203 ++193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158 ++60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 ++5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0 ++4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203 ++193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125 ++153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 ++24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0 ++6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 ++5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 ++6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6 ++4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6 ++4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 ++4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6 ++5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 ++5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 +diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c +index 443e3c8..c443d6a 100644 +--- a/drivers/video/nvidia/nv_backlight.c ++++ b/drivers/video/nvidia/nv_backlight.c +@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd) + return bd->props.brightness; + } + +-static struct backlight_ops nvidia_bl_ops = { ++static const struct backlight_ops nvidia_bl_ops = { + .get_brightness = nvidia_bl_get_brightness, + .update_status = nvidia_bl_update_status, + }; +diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c +index d94c57f..912984c 100644 +--- a/drivers/video/riva/fbdev.c ++++ b/drivers/video/riva/fbdev.c +@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd) + return bd->props.brightness; + } + +-static struct backlight_ops riva_bl_ops = { ++static const struct backlight_ops riva_bl_ops = { + .get_brightness = riva_bl_get_brightness, + .update_status = riva_bl_update_status, + }; +diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c +index 54fbb29..37bab4b 100644 +--- a/drivers/video/uvesafb.c ++++ b/drivers/video/uvesafb.c +@@ -18,6 +18,7 @@ + #include <linux/fb.h> + #include <linux/io.h> + #include <linux/mutex.h> ++#include <linux/moduleloader.h> + #include <video/edid.h> + #include <video/uvesafb.h> + #ifdef CONFIG_X86 +@@ -72,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns + struct uvesafb_task *utask; + struct uvesafb_ktask *task; + +- if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) ++ if (!capable(CAP_SYS_ADMIN)) + return; + + if (msg->seq >= UVESAFB_TASKS_MAX) +@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void) + NULL, + }; + +- return call_usermodehelper(v86d_path, argv, envp, 1); ++ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC); + } + + /* +@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task, + if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) { + par->pmi_setpal = par->ypan = 0; + } else { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_MODULES ++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx); ++#endif ++ if (!par->pmi_code) { ++ par->pmi_setpal = par->ypan = 0; ++ return 0; ++ } ++#endif ++ + par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4) + + task->t.regs.edi); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx); ++ pax_close_kernel(); ++ ++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]); ++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]); ++#else + par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1]; + par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2]; ++#endif ++ + printk(KERN_INFO "uvesafb: protected mode interface info at " + "%04x:%04x\n", + (u16)task->t.regs.es, (u16)task->t.regs.edi); +@@ -1799,6 +1822,11 @@ out: + if (par->vbe_modes) + kfree(par->vbe_modes); + ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + framebuffer_release(info); + return err; + } +@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev) + kfree(par->vbe_state_orig); + if (par->vbe_state_saved) + kfree(par->vbe_state_saved); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + } + + framebuffer_release(info); +diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c +index bd37ee1..cb827e8 100644 +--- a/drivers/video/vesafb.c ++++ b/drivers/video/vesafb.c +@@ -9,6 +9,7 @@ + */ + + #include <linux/module.h> ++#include <linux/moduleloader.h> + #include <linux/kernel.h> + #include <linux/errno.h> + #include <linux/string.h> +@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */ + static int vram_total __initdata; /* Set total amount of memory */ + static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */ + static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */ +-static void (*pmi_start)(void) __read_mostly; +-static void (*pmi_pal) (void) __read_mostly; ++static void (*pmi_start)(void) __read_only; ++static void (*pmi_pal) (void) __read_only; + static int depth __read_mostly; + static int vga_compat __read_mostly; + /* --------------------------------------------------------------------- */ +@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev) + unsigned int size_vmode; + unsigned int size_remap; + unsigned int size_total; ++ void *pmi_code = NULL; + + if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) + return -ENODEV; +@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev) + size_remap = size_total; + vesafb_fix.smem_len = size_remap; + +-#ifndef __i386__ +- screen_info.vesapm_seg = 0; +-#endif +- + if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { + printk(KERN_WARNING + "vesafb: cannot reserve video memory at 0x%lx\n", +@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev) + printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", + vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); + ++#ifdef __i386__ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_code = module_alloc_exec(screen_info.vesapm_size); ++ if (!pmi_code) ++#elif !defined(CONFIG_PAX_KERNEXEC) ++ if (0) ++#endif ++ ++#endif ++ screen_info.vesapm_seg = 0; ++ + if (screen_info.vesapm_seg) { +- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", +- screen_info.vesapm_seg,screen_info.vesapm_off); ++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n", ++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size); + } + + if (screen_info.vesapm_seg < 0xc000) +@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev) + + if (ypan || pmi_setpal) { + unsigned short *pmi_base; ++ + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); +- pmi_start = (void*)((char*)pmi_base + pmi_base[1]); +- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size); ++#else ++ pmi_code = pmi_base; ++#endif ++ ++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]); ++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_start = ktva_ktla(pmi_start); ++ pmi_pal = ktva_ktla(pmi_pal); ++ pax_close_kernel(); ++#endif ++ + printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); + if (pmi_base[3]) { + printk(KERN_INFO "vesafb: pmi: ports = "); +@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev) + info->node, info->fix.id); + return 0; + err: ++ ++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ module_free_exec(NULL, pmi_code); ++#endif ++ + if (info->screen_base) + iounmap(info->screen_base); + framebuffer_release(info); +diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c +index 88a60e0..6783cc2 100644 +--- a/drivers/xen/sys-hypervisor.c ++++ b/drivers/xen/sys-hypervisor.c +@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj, + return 0; + } + +-static struct sysfs_ops hyp_sysfs_ops = { ++static const struct sysfs_ops hyp_sysfs_ops = { + .show = hyp_sysfs_show, + .store = hyp_sysfs_store, + }; +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c +index 18f74ec..3227009 100644 +--- a/fs/9p/vfs_inode.c ++++ b/fs/9p/vfs_inode.c +@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) + static void + v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + + P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, + IS_ERR(s) ? "<error>" : s); +diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt +index bb4cc5b..df5eaa0 100644 +--- a/fs/Kconfig.binfmt ++++ b/fs/Kconfig.binfmt +@@ -86,7 +86,7 @@ config HAVE_AOUT + + config BINFMT_AOUT + tristate "Kernel support for a.out and ECOFF binaries" +- depends on HAVE_AOUT ++ depends on HAVE_AOUT && BROKEN + ---help--- + A.out (Assembler.OUTput) is a set of formats for libraries and + executables used in the earliest versions of UNIX. Linux used +diff --git a/fs/aio.c b/fs/aio.c +index 22a19ad..d484e5b 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx) + size += sizeof(struct io_event) * nr_events; + nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; + +- if (nr_pages < 0) ++ if (nr_pages <= 0) + return -EINVAL; + + nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); +@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx, + struct aio_timeout to; + int retry = 0; + ++ pax_track_stack(); ++ + /* needed to zero any padding within an entry (there shouldn't be + * any, but C is fun! + */ +@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb) + static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) + { + ssize_t ret; ++ struct iovec iovstack; + + ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, + kiocb->ki_nbytes, 1, +- &kiocb->ki_inline_vec, &kiocb->ki_iovec); ++ &iovstack, &kiocb->ki_iovec); + if (ret < 0) + goto out; + ++ if (kiocb->ki_iovec == &iovstack) { ++ kiocb->ki_inline_vec = iovstack; ++ kiocb->ki_iovec = &kiocb->ki_inline_vec; ++ } + kiocb->ki_nr_segs = kiocb->ki_nbytes; + kiocb->ki_cur_seg = 0; + /* ki_nbytes/left now reflect bytes instead of segs */ +diff --git a/fs/attr.c b/fs/attr.c +index 96d394b..33cf5b4 100644 +--- a/fs/attr.c ++++ b/fs/attr.c +@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset) + unsigned long limit; + + limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; ++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1); + if (limit != RLIM_INFINITY && offset > limit) + goto out_sig; + if (offset > inode->i_sb->s_maxbytes) +diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c +index b4ea829..e63ef18 100644 +--- a/fs/autofs4/symlink.c ++++ b/fs/autofs4/symlink.c +@@ -15,7 +15,7 @@ + static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) + { + struct autofs_info *ino = autofs4_dentry_ino(dentry); +- nd_set_link(nd, (char *)ino->u.symlink); ++ nd_set_link(nd, ino->u.symlink); + return NULL; + } + +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c +index 136a0d6..a287331 100644 +--- a/fs/autofs4/waitq.c ++++ b/fs/autofs4/waitq.c +@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes) + { + unsigned long sigpipe, flags; + mm_segment_t fs; +- const char *data = (const char *)addr; ++ const char __user *data = (const char __force_user *)addr; + ssize_t wr = 0; + + /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/ +diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c +index 9158c07..3f06659 100644 +--- a/fs/befs/linuxvfs.c ++++ b/fs/befs/linuxvfs.c +@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { + befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); + if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { +- char *link = nd_get_link(nd); ++ const char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); + } +diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c +index 0133b5a..3710d09 100644 +--- a/fs/binfmt_aout.c ++++ b/fs/binfmt_aout.c +@@ -16,6 +16,7 @@ + #include <linux/string.h> + #include <linux/fs.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/stat.h> + #include <linux/fcntl.h> + #include <linux/ptrace.h> +@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u + #endif + # define START_STACK(u) (u.start_stack) + ++ memset(&dump, 0, sizeof(dump)); ++ + fs = get_fs(); + set_fs(KERNEL_DS); + has_dumped = 1; +@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u + + /* If the size of the dump file exceeds the rlimit, then see what would happen + if we wrote the stack, but not the data area. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1); + if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit) + dump.u_dsize = 0; + + /* Make sure we have enough room to write the stack and data areas. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1); + if ((dump.u_ssize + 1) * PAGE_SIZE > limit) + dump.u_ssize = 0; + +@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u + dump_size = dump.u_ssize << PAGE_SHIFT; + DUMP_WRITE(dump_start,dump_size); + } +-/* Finally dump the task struct. Not be used by gdb, but could be useful */ +- set_fs(KERNEL_DS); +- DUMP_WRITE(current,sizeof(*current)); ++/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */ + end_coredump: + set_fs(fs); + return has_dumped; +@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) + rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; + if (rlim >= RLIM_INFINITY) + rlim = ~0; ++ ++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1); + if (ex.a_data + ex.a_bss > rlim) + return -ENOMEM; + +@@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) + current->mm->free_area_cache = current->mm->mmap_base; + current->mm->cached_hole_size = 0; + ++ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); ++ if (retval < 0) { ++ /* Someone check-me: is this error path enough? */ ++ send_sig(SIGKILL, current, 0); ++ return retval; ++ } ++ + install_exec_creds(bprm); + current->flags &= ~PF_FORKNOEXEC; + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) { ++ current->mm->pax_flags |= MF_PAX_PAGEEXEC; ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP) ++ current->mm->pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT)) ++ current->mm->pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++ } ++#endif ++ + if (N_MAGIC(ex) == OMAGIC) { + unsigned long text_addr, map_size; + loff_t pos; +@@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) + + down_write(¤t->mm->mmap_sem); + error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, +- PROT_READ | PROT_WRITE | PROT_EXEC, ++ PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, + fd_offset + ex.a_text); + up_write(¤t->mm->mmap_sem); +@@ -367,13 +400,6 @@ beyond_if: + return retval; + } + +- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); +- if (retval < 0) { +- /* Someone check-me: is this error path enough? */ +- send_sig(SIGKILL, current, 0); +- return retval; +- } +- + current->mm->start_stack = + (unsigned long) create_aout_tables((char __user *) bprm->p, bprm); + #ifdef __alpha__ +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index a64fde6..7ce2817 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -31,6 +31,7 @@ + #include <linux/random.h> + #include <linux/elf.h> + #include <linux/utsname.h> ++#include <linux/xattr.h> + #include <asm/uaccess.h> + #include <asm/param.h> + #include <asm/page.h> +@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un + #define elf_core_dump NULL + #endif + ++#ifdef CONFIG_PAX_MPROTECT ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags); ++#endif ++ + #if ELF_EXEC_PAGESIZE > PAGE_SIZE + #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE + #else +@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = { + .load_binary = load_elf_binary, + .load_shlib = load_elf_library, + .core_dump = elf_core_dump, ++ ++#ifdef CONFIG_PAX_MPROTECT ++ .handle_mprotect= elf_handle_mprotect, ++#endif ++ + .min_coredump = ELF_EXEC_PAGESIZE, + .hasvdso = 1 + }; +@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = { + + static int set_brk(unsigned long start, unsigned long end) + { ++ unsigned long e = end; ++ + start = ELF_PAGEALIGN(start); + end = ELF_PAGEALIGN(end); + if (end > start) { +@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end) + if (BAD_ADDR(addr)) + return addr; + } +- current->mm->start_brk = current->mm->brk = end; ++ current->mm->start_brk = current->mm->brk = e; + return 0; + } + +@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + elf_addr_t __user *u_rand_bytes; + const char *k_platform = ELF_PLATFORM; + const char *k_base_platform = ELF_BASE_PLATFORM; +- unsigned char k_rand_bytes[16]; ++ u32 k_rand_bytes[4]; + int items; + elf_addr_t *elf_info; + int ei_index = 0; + const struct cred *cred = current_cred(); + struct vm_area_struct *vma; ++ unsigned long saved_auxv[AT_VECTOR_SIZE]; ++ ++ pax_track_stack(); + + /* + * In some cases (e.g. Hyper-Threading), we want to avoid L1 +@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + * Generate 16 random bytes for userspace PRNG seeding. + */ + get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); +- u_rand_bytes = (elf_addr_t __user *) +- STACK_ALLOC(p, sizeof(k_rand_bytes)); ++ srandom32(k_rand_bytes[0] ^ random32()); ++ srandom32(k_rand_bytes[1] ^ random32()); ++ srandom32(k_rand_bytes[2] ^ random32()); ++ srandom32(k_rand_bytes[3] ^ random32()); ++ p = STACK_ROUND(p, sizeof(k_rand_bytes)); ++ u_rand_bytes = (elf_addr_t __user *) p; + if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) + return -EFAULT; + +@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + return -EFAULT; + current->mm->env_end = p; + ++ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t)); ++ + /* Put the elf_info on the stack in the right place. */ + sp = (elf_addr_t __user *)envp + 1; +- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) ++ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t))) + return -EFAULT; + return 0; + } +@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + { + struct elf_phdr *elf_phdata; + struct elf_phdr *eppnt; +- unsigned long load_addr = 0; ++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE; + int load_addr_set = 0; + unsigned long last_bss = 0, elf_bss = 0; +- unsigned long error = ~0UL; ++ unsigned long error = -EINVAL; + unsigned long total_size; + int retval, i, size; + +@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + goto out_close; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ + eppnt = elf_phdata; + for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { + if (eppnt->p_type == PT_LOAD) { +@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + k = load_addr + eppnt->p_vaddr; + if (BAD_ADDR(k) || + eppnt->p_filesz > eppnt->p_memsz || +- eppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - eppnt->p_memsz < k) { ++ eppnt->p_memsz > pax_task_size || ++ pax_task_size - eppnt->p_memsz < k) { + error = -ENOMEM; + goto out_close; + } +@@ -532,6 +558,351 @@ out: + return error; + } + ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) ++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (elf_phdata->p_flags & PF_PAGEEXEC) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (elf_phdata->p_flags & PF_SEGMEXEC) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if (nx_enabled) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (elf_phdata->p_flags & PF_EMUTRAMP) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (elf_phdata->p_flags & PF_MPROTECT) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if (nx_enabled) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(elf_phdata->p_flags & PF_NOMPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_EI_PAX ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if (nx_enabled) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#else ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) { ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ pax_flags |= MF_PAX_SEGMEXEC; ++ } ++#endif ++ ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata) ++{ ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ unsigned long i; ++ ++ for (i = 0UL; i < elf_ex->e_phnum; i++) ++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) { ++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) || ++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) || ++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) || ++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) || ++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP))) ++ return ~0UL; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_parse_pt_pax_softmode(&elf_phdata[i]); ++ else ++#endif ++ ++ return pax_parse_pt_pax_hardmode(&elf_phdata[i]); ++ break; ++ } ++#endif ++ ++ return ~0UL; ++} ++ ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (pax_flags_softmode & MF_PAX_PAGEEXEC) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_flags_softmode & MF_PAX_SEGMEXEC) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (pax_flags_softmode & MF_PAX_EMUTRAMP) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (pax_flags_softmode & MF_PAX_MPROTECT) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(pax_flags_hardmode & MF_PAX_MPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++static unsigned long pax_parse_xattr_pax(struct file * const file) ++{ ++ ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++ ssize_t xattr_size, i; ++ unsigned char xattr_value[5]; ++ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL; ++ ++ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value); ++ if (xattr_size <= 0) ++ return ~0UL; ++ ++ for (i = 0; i < xattr_size; i++) ++ switch (xattr_value[i]) { ++ default: ++ return ~0UL; ++ ++#define parse_flag(option1, option2, flag) \ ++ case option1: \ ++ pax_flags_hardmode |= MF_PAX_##flag; \ ++ break; \ ++ case option2: \ ++ pax_flags_softmode |= MF_PAX_##flag; \ ++ break; ++ ++ parse_flag('p', 'P', PAGEEXEC); ++ parse_flag('e', 'E', EMUTRAMP); ++ parse_flag('m', 'M', MPROTECT); ++ parse_flag('r', 'R', RANDMMAP); ++ parse_flag('s', 'S', SEGMEXEC); ++ ++#undef parse_flag ++ } ++ ++ if (pax_flags_hardmode & pax_flags_softmode) ++ return ~0UL; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_parse_xattr_pax_softmode(pax_flags_softmode); ++ else ++#endif ++ ++ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode); ++#else ++ return ~0UL; ++#endif ++ ++} ++ ++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file) ++{ ++ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags; ++ ++ pax_flags = pax_parse_ei_pax(elf_ex); ++ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata); ++ xattr_pax_flags = pax_parse_xattr_pax(file); ++ ++ if (pt_pax_flags == ~0UL) ++ pt_pax_flags = xattr_pax_flags; ++ else if (xattr_pax_flags == ~0UL) ++ xattr_pax_flags = pt_pax_flags; ++ if (pt_pax_flags != xattr_pax_flags) ++ return -EINVAL; ++ if (pt_pax_flags != ~0UL) ++ pax_flags = pt_pax_flags; ++ ++ if (0 > pax_check_flags(&pax_flags)) ++ return -EINVAL; ++ ++ current->mm->pax_flags = pax_flags; ++ return 0; ++} ++#endif ++ + /* + * These are the functions used to load ELF style executables and shared + * libraries. There is no binary dependent code anywhere else. +@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top) + { + unsigned int random_variable = 0; + ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (randomize_va_space) ++ return stack_top - current->mm->delta_stack; ++#endif ++ + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { + random_variable = get_random_int() & STACK_RND_MASK; +@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + unsigned long load_addr = 0, load_bias = 0; + int load_addr_set = 0; + char * elf_interpreter = NULL; +- unsigned long error; ++ unsigned long error = 0; + struct elf_phdr *elf_ppnt, *elf_phdata; + unsigned long elf_bss, elf_brk; + int retval, i; +@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + unsigned long start_code, end_code, start_data, end_data; + unsigned long reloc_func_desc = 0; + int executable_stack = EXSTACK_DEFAULT; +- unsigned long def_flags = 0; + struct { + struct elfhdr elf_ex; + struct elfhdr interp_elf_ex; + } *loc; ++ unsigned long pax_task_size = TASK_SIZE; + + loc = kmalloc(sizeof(*loc), GFP_KERNEL); + if (!loc) { +@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + + /* OK, This is the point of no return */ + current->flags &= ~PF_FORKNOEXEC; +- current->mm->def_flags = def_flags; ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ current->mm->call_dl_resolve = 0UL; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ current->mm->call_syscall = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ current->mm->delta_mmap = 0UL; ++ current->mm->delta_stack = 0UL; ++#endif ++ ++ current->mm->def_flags = 0; ++ ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) ++ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) { ++ send_sig(SIGKILL, current, 0); ++ goto out_free_dentry; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++ pax_set_initial_flags(bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++ if (pax_set_initial_flags_func) ++ (pax_set_initial_flags_func)(bprm); ++#endif ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) { ++ current->mm->context.user_cs_limit = PAGE_SIZE; ++ current->mm->def_flags |= VM_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE; ++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE; ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++ } ++#endif ++ ++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu()); ++ put_cpu(); ++ } ++#endif + + /* Do this immediately, since STACK_TOP as used in setup_arg_pages + may depend on the personality. */ + SET_PERSONALITY(loc->elf_ex); ++ ++#ifdef CONFIG_PAX_ASLR ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) { ++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT; ++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ executable_stack = EXSTACK_DISABLE_X; ++ current->personality &= ~READ_IMPLIES_EXEC; ++ } else ++#endif ++ + if (elf_read_implies_exec(loc->elf_ex, executable_stack)) + current->personality |= READ_IMPLIES_EXEC; + +@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + * might try to exec. This is because the brk will + * follow the loader, and is not movable. */ + #ifdef CONFIG_X86 +- load_bias = 0; ++ if (current->flags & PF_RANDOMIZE) ++ load_bias = 0; ++ else ++ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + #else + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ /* PaX: randomize base address at the default exe base if requested */ ++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) { ++#ifdef CONFIG_SPARC64 ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1); ++#else ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT; ++#endif ++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias); ++ elf_flags |= MAP_FIXED; ++ } ++#endif ++ + } + + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, +@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + * allowed task size. Note that p_filesz must always be + * <= p_memsz so it is only necessary to check p_memsz. + */ +- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || +- elf_ppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - elf_ppnt->p_memsz < k) { ++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz || ++ elf_ppnt->p_memsz > pax_task_size || ++ pax_task_size - elf_ppnt->p_memsz < k) { + /* set_brk can never work. Avoid overflows. */ + send_sig(SIGKILL, current, 0); + retval = -EINVAL; +@@ -877,11 +1339,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + goto out_free_dentry; + } + if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { +- send_sig(SIGSEGV, current, 0); +- retval = -EFAULT; /* Nobody gets to see this, but.. */ +- goto out_free_dentry; ++ /* ++ * This bss-zeroing can fail if the ELF ++ * file specifies odd protections. So ++ * we don't check the return value ++ */ + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) { ++ unsigned long start, size; ++ ++ start = ELF_PAGEALIGN(elf_brk); ++ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4); ++ down_write(¤t->mm->mmap_sem); ++ retval = -ENOMEM; ++ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) { ++ unsigned long prot = PROT_NONE; ++ ++ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT; ++// if (current->personality & ADDR_NO_RANDOMIZE) ++// prot = PROT_READ; ++ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0); ++ retval = IS_ERR_VALUE(start) ? start : 0; ++ } ++ up_write(¤t->mm->mmap_sem); ++ if (retval == 0) ++ retval = set_brk(start + size, start + size + PAGE_SIZE); ++ if (retval < 0) { ++ send_sig(SIGKILL, current, 0); ++ goto out_free_dentry; ++ } ++ } ++#endif ++ + if (elf_interpreter) { + unsigned long uninitialized_var(interp_map_addr); + +@@ -1112,8 +1603,10 @@ static int dump_seek(struct file *file, loff_t off) + unsigned long n = off; + if (n > PAGE_SIZE) + n = PAGE_SIZE; +- if (!dump_write(file, buf, n)) ++ if (!dump_write(file, buf, n)) { ++ free_page((unsigned long)buf); + return 0; ++ } + off -= n; + } + free_page((unsigned long)buf); +@@ -1125,7 +1618,7 @@ static int dump_seek(struct file *file, loff_t off) + * Decide what to dump of a segment, part, all or none. + */ + static unsigned long vma_dump_size(struct vm_area_struct *vma, +- unsigned long mm_flags) ++ unsigned long mm_flags, long signr) + { + #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) + +@@ -1159,7 +1652,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, + if (vma->vm_file == NULL) + return 0; + +- if (FILTER(MAPPED_PRIVATE)) ++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE)) + goto whole; + + /* +@@ -1255,8 +1748,11 @@ static int writenote(struct memelfnote *men, struct file *file, + #undef DUMP_WRITE + + #define DUMP_WRITE(addr, nr) \ ++ do { \ ++ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \ + if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ +- goto end_coredump; ++ goto end_coredump; \ ++ } while (0); + + static void fill_elf_header(struct elfhdr *elf, int segs, + u16 machine, u32 flags, u8 osabi) +@@ -1385,9 +1881,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) + { + elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; + int i = 0; +- do ++ do { + i += 2; +- while (auxv[i - 2] != AT_NULL); ++ } while (auxv[i - 2] != AT_NULL); + fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); + } + +@@ -1973,7 +2469,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un + phdr.p_offset = offset; + phdr.p_vaddr = vma->vm_start; + phdr.p_paddr = 0; +- phdr.p_filesz = vma_dump_size(vma, mm_flags); ++ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr); + phdr.p_memsz = vma->vm_end - vma->vm_start; + offset += phdr.p_filesz; + phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; +@@ -2006,7 +2502,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un + unsigned long addr; + unsigned long end; + +- end = vma->vm_start + vma_dump_size(vma, mm_flags); ++ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr); + + for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { + struct page *page; +@@ -2015,6 +2511,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un + page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); ++ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1); + stop = ((size += PAGE_SIZE) > limit) || + !dump_write(file, kaddr, PAGE_SIZE); + kunmap(page); +@@ -2042,6 +2539,97 @@ out: + + #endif /* USE_ELF_CORE_DUMP */ + ++#ifdef CONFIG_PAX_MPROTECT ++/* PaX: non-PIC ELF libraries need relocations on their executable segments ++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly ++ * we'll remove VM_MAYWRITE for good on RELRO segments. ++ * ++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment ++ * basis because we want to allow the common case and not the special ones. ++ */ ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags) ++{ ++ struct elfhdr elf_h; ++ struct elf_phdr elf_p; ++ unsigned long i; ++ unsigned long oldflags; ++ bool is_textrel_rw, is_textrel_rx, is_relro; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT)) ++ return; ++ ++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ); ++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ; ++ ++#ifdef CONFIG_PAX_ELFRELOCS ++ /* possible TEXTREL */ ++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ); ++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ); ++#else ++ is_textrel_rw = false; ++ is_textrel_rx = false; ++#endif ++ ++ /* possible RELRO */ ++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ); ++ ++ if (!is_textrel_rw && !is_textrel_rx && !is_relro) ++ return; ++ ++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) || ++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || ++ ++#ifdef CONFIG_PAX_ETEXECRELOCS ++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++#else ++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) || ++#endif ++ ++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++ !elf_check_arch(&elf_h) || ++ elf_h.e_phentsize != sizeof(struct elf_phdr) || ++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr)) ++ return; ++ ++ for (i = 0UL; i < elf_h.e_phnum; i++) { ++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p))) ++ return; ++ switch (elf_p.p_type) { ++ case PT_DYNAMIC: ++ if (!is_textrel_rw && !is_textrel_rx) ++ continue; ++ i = 0UL; ++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) { ++ elf_dyn dyn; ++ ++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn))) ++ return; ++ if (dyn.d_tag == DT_NULL) ++ return; ++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { ++ gr_log_textrel(vma); ++ if (is_textrel_rw) ++ vma->vm_flags |= VM_MAYWRITE; ++ else ++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ ++ vma->vm_flags &= ~VM_MAYWRITE; ++ return; ++ } ++ i++; ++ } ++ return; ++ ++ case PT_GNU_RELRO: ++ if (!is_relro) ++ continue; ++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start) ++ vma->vm_flags &= ~VM_MAYWRITE; ++ return; ++ } ++ } ++} ++#endif ++ + static int __init init_elf_binfmt(void) + { + return register_binfmt(&elf_format); +diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c +index ca88c46..f155a60 100644 +--- a/fs/binfmt_flat.c ++++ b/fs/binfmt_flat.c +@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm, + realdatastart = (unsigned long) -ENOMEM; + printk("Unable to allocate RAM for process data, errno %d\n", + (int)-realdatastart); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len); ++ up_write(¤t->mm->mmap_sem); + ret = realdatastart; + goto err; + } +@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm, + } + if (IS_ERR_VALUE(result)) { + printk("Unable to read data+bss, errno %d\n", (int)-result); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len); + do_munmap(current->mm, realdatastart, data_len + extra); ++ up_write(¤t->mm->mmap_sem); + ret = result; + goto err; + } +@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm, + } + if (IS_ERR_VALUE(result)) { + printk("Unable to read code+data+bss, errno %d\n",(int)-result); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len + data_len + extra + + MAX_SHARED_LIBS * sizeof(unsigned long)); ++ up_write(¤t->mm->mmap_sem); + ret = result; + goto err; + } +diff --git a/fs/bio.c b/fs/bio.c +index e696713..4b5969d 100644 +--- a/fs/bio.c ++++ b/fs/bio.c +@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) + + i = 0; + while (i < bio_slab_nr) { +- struct bio_slab *bslab = &bio_slabs[i]; ++ bslab = &bio_slabs[i]; + + if (!bslab->slab && entry == -1) + entry = i; +@@ -841,7 +841,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, + /* + * Overflow, abort + */ +- if (end < start) ++ if (end < start || end - start > INT_MAX - nr_pages) + return ERR_PTR(-EINVAL); + + nr_pages += end - start; +@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err) + const int read = bio_data_dir(bio) == READ; + struct bio_map_data *bmd = bio->bi_private; + int i; +- char *p = bmd->sgvecs[0].iov_base; ++ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base; + + __bio_for_each_segment(bvec, bio, i, 0) { + char *addr = page_address(bvec->bv_page); +diff --git a/fs/block_dev.c b/fs/block_dev.c +index e65efa2..04fae57 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder) + else if (bdev->bd_contains == bdev) + res = 0; /* is a whole device which isn't held */ + +- else if (bdev->bd_contains->bd_holder == bd_claim) ++ else if (bdev->bd_contains->bd_holder == (void *)bd_claim) + res = 0; /* is a partition of a device that is being partitioned */ + else if (bdev->bd_contains->bd_holder != NULL) + res = -EBUSY; /* is a partition of a held device */ +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index c4bc570..42acd8d 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, + free_extent_buffer(buf); + add_root_to_dirty_list(root); + } else { +- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) +- parent_start = parent->start; +- else ++ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { ++ if (parent) ++ parent_start = parent->start; ++ else ++ parent_start = 0; ++ } else + parent_start = 0; + + WARN_ON(trans->transid != btrfs_header_generation(parent)); +@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans, + + ret = 0; + if (slot == 0) { +- struct btrfs_disk_key disk_key; + btrfs_cpu_key_to_disk(&disk_key, cpu_key); + ret = fixup_low_keys(trans, root, path, &disk_key, 1); + } +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index f447188..59c17c5 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -39,7 +39,7 @@ + #include "tree-log.h" + #include "free-space-cache.h" + +-static struct extent_io_ops btree_extent_io_ops; ++static const struct extent_io_ops btree_extent_io_ops; + static void end_workqueue_fn(struct btrfs_work *work); + static void free_fs_root(struct btrfs_root *root); + +@@ -2607,7 +2607,7 @@ out: + return 0; + } + +-static struct extent_io_ops btree_extent_io_ops = { ++static const struct extent_io_ops btree_extent_io_ops = { + .write_cache_pages_lock_hook = btree_lock_page_hook, + .readpage_end_io_hook = btree_readpage_end_io_hook, + .submit_bio_hook = btree_submit_bio_hook, +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 559f724..a026171 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, + u64 group_start = group->key.objectid; + new_extents = kmalloc(sizeof(*new_extents), + GFP_NOFS); ++ if (!new_extents) { ++ ret = -ENOMEM; ++ goto out; ++ } + nr_extents = 1; + ret = get_new_locations(reloc_inode, + extent_key, +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h +index 36de250..7ec75c7 100644 +--- a/fs/btrfs/extent_io.h ++++ b/fs/btrfs/extent_io.h +@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, + struct bio *bio, int mirror_num, + unsigned long bio_flags); + struct extent_io_ops { +- int (*fill_delalloc)(struct inode *inode, struct page *locked_page, ++ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written); +- int (*writepage_start_hook)(struct page *page, u64 start, u64 end); +- int (*writepage_io_hook)(struct page *page, u64 start, u64 end); ++ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end); ++ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end); + extent_submit_bio_hook_t *submit_bio_hook; +- int (*merge_bio_hook)(struct page *page, unsigned long offset, ++ int (* const merge_bio_hook)(struct page *page, unsigned long offset, + size_t size, struct bio *bio, + unsigned long bio_flags); +- int (*readpage_io_hook)(struct page *page, u64 start, u64 end); +- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, ++ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end); ++ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page, + u64 start, u64 end, + struct extent_state *state); +- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, ++ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page, + u64 start, u64 end, + struct extent_state *state); +- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, ++ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end, + struct extent_state *state); +- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, ++ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end, + struct extent_state *state, int uptodate); +- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, ++ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end, + unsigned long old, unsigned long bits); +- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state, ++ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state, + unsigned long bits); +- int (*merge_extent_hook)(struct inode *inode, ++ int (* const merge_extent_hook)(struct inode *inode, + struct extent_state *new, + struct extent_state *other); +- int (*split_extent_hook)(struct inode *inode, ++ int (* const split_extent_hook)(struct inode *inode, + struct extent_state *orig, u64 split); +- int (*write_cache_pages_lock_hook)(struct page *page); ++ int (* const write_cache_pages_lock_hook)(struct page *page); + }; + + struct extent_io_tree { +@@ -88,7 +88,7 @@ struct extent_io_tree { + u64 dirty_bytes; + spinlock_t lock; + spinlock_t buffer_lock; +- struct extent_io_ops *ops; ++ const struct extent_io_ops *ops; + }; + + struct extent_state { +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c +index cb2849f..3718fb4 100644 +--- a/fs/btrfs/free-space-cache.c ++++ b/fs/btrfs/free-space-cache.c +@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, + + while(1) { + if (entry->bytes < bytes || entry->offset < min_start) { +- struct rb_node *node; +- + node = rb_next(&entry->offset_index); + if (!node) + break; +@@ -1226,7 +1224,7 @@ again: + */ + while (entry->bitmap || found_bitmap || + (!entry->bitmap && entry->bytes < min_bytes)) { +- struct rb_node *node = rb_next(&entry->offset_index); ++ node = rb_next(&entry->offset_index); + + if (entry->bitmap && entry->bytes > bytes + empty_size) { + ret = btrfs_bitmap_cluster(block_group, entry, cluster, +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index e03a836..323837e 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations; + static const struct address_space_operations btrfs_aops; + static const struct address_space_operations btrfs_symlink_aops; + static const struct file_operations btrfs_dir_file_operations; +-static struct extent_io_ops btrfs_extent_io_ops; ++static const struct extent_io_ops btrfs_extent_io_ops; + + static struct kmem_cache *btrfs_inode_cachep; + struct kmem_cache *btrfs_trans_handle_cachep; +@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, + 1, 0, NULL, GFP_NOFS); + while (start < end) { + async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); ++ BUG_ON(!async_cow); + async_cow->inode = inode; + async_cow->root = root; + async_cow->locked_page = locked_page; +@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path, + inline_size = btrfs_file_extent_inline_item_len(leaf, + btrfs_item_nr(leaf, path->slots[0])); + tmp = kmalloc(inline_size, GFP_NOFS); ++ if (!tmp) ++ return -ENOMEM; + ptr = btrfs_file_extent_inline_start(item); + + read_extent_buffer(leaf, tmp, ptr, inline_size); +@@ -5410,7 +5413,7 @@ fail: + return -ENOMEM; + } + +-static int btrfs_getattr(struct vfsmount *mnt, ++int btrfs_getattr(struct vfsmount *mnt, + struct dentry *dentry, struct kstat *stat) + { + struct inode *inode = dentry->d_inode; +@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt, + return 0; + } + ++EXPORT_SYMBOL(btrfs_getattr); ++ ++dev_t get_btrfs_dev_from_inode(struct inode *inode) ++{ ++ return BTRFS_I(inode)->root->anon_super.s_dev; ++} ++EXPORT_SYMBOL(get_btrfs_dev_from_inode); ++ + static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) + { +@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = { + .fsync = btrfs_sync_file, + }; + +-static struct extent_io_ops btrfs_extent_io_ops = { ++static const struct extent_io_ops btrfs_extent_io_ops = { + .fill_delalloc = run_delalloc_range, + .submit_bio_hook = btrfs_submit_bio_hook, + .merge_bio_hook = btrfs_merge_bio_hook, +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index ab7ab53..94e0781 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del) + } + spin_unlock(&rc->reloc_root_tree.lock); + +- BUG_ON((struct btrfs_root *)node->data != root); ++ BUG_ON(!node || (struct btrfs_root *)node->data != root); + + if (!del) { + spin_lock(&rc->reloc_root_tree.lock); +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c +index a240b6f..4ce16ef 100644 +--- a/fs/btrfs/sysfs.c ++++ b/fs/btrfs/sysfs.c +@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj) + complete(&root->kobj_unregister); + } + +-static struct sysfs_ops btrfs_super_attr_ops = { ++static const struct sysfs_ops btrfs_super_attr_ops = { + .show = btrfs_super_attr_show, + .store = btrfs_super_attr_store, + }; + +-static struct sysfs_ops btrfs_root_attr_ops = { ++static const struct sysfs_ops btrfs_root_attr_ops = { + .show = btrfs_root_attr_show, + .store = btrfs_root_attr_store, + }; +diff --git a/fs/buffer.c b/fs/buffer.c +index 6fa5302..395d9f6 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -25,6 +25,7 @@ + #include <linux/percpu.h> + #include <linux/slab.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/blkdev.h> + #include <linux/file.h> + #include <linux/quotaops.h> +diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c +index 3797e00..ce776f6 100644 +--- a/fs/cachefiles/bind.c ++++ b/fs/cachefiles/bind.c +@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) + args); + + /* start by checking things over */ +- ASSERT(cache->fstop_percent >= 0 && +- cache->fstop_percent < cache->fcull_percent && ++ ASSERT(cache->fstop_percent < cache->fcull_percent && + cache->fcull_percent < cache->frun_percent && + cache->frun_percent < 100); + +- ASSERT(cache->bstop_percent >= 0 && +- cache->bstop_percent < cache->bcull_percent && ++ ASSERT(cache->bstop_percent < cache->bcull_percent && + cache->bcull_percent < cache->brun_percent && + cache->brun_percent < 100); + +diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c +index 4618516..bb30d01 100644 +--- a/fs/cachefiles/daemon.c ++++ b/fs/cachefiles/daemon.c +@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file, + if (test_bit(CACHEFILES_DEAD, &cache->flags)) + return -EIO; + +- if (datalen < 0 || datalen > PAGE_SIZE - 1) ++ if (datalen > PAGE_SIZE - 1) + return -EOPNOTSUPP; + + /* drag the command string into the kernel so we can parse it */ +@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (fstop < 0 || fstop >= cache->fcull_percent) ++ if (fstop >= cache->fcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->fstop_percent = fstop; +@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (bstop < 0 || bstop >= cache->bcull_percent) ++ if (bstop >= cache->bcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->bstop_percent = bstop; +diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h +index f7c255f..fcd61de 100644 +--- a/fs/cachefiles/internal.h ++++ b/fs/cachefiles/internal.h +@@ -56,7 +56,7 @@ struct cachefiles_cache { + wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */ + struct rb_root active_nodes; /* active nodes (can't be culled) */ + rwlock_t active_lock; /* lock for active_nodes */ +- atomic_t gravecounter; /* graveyard uniquifier */ ++ atomic_unchecked_t gravecounter; /* graveyard uniquifier */ + unsigned frun_percent; /* when to stop culling (% files) */ + unsigned fcull_percent; /* when to start culling (% files) */ + unsigned fstop_percent; /* when to stop allocating (% files) */ +@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache, + * proc.c + */ + #ifdef CONFIG_CACHEFILES_HISTOGRAM +-extern atomic_t cachefiles_lookup_histogram[HZ]; +-extern atomic_t cachefiles_mkdir_histogram[HZ]; +-extern atomic_t cachefiles_create_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_create_histogram[HZ]; + + extern int __init cachefiles_proc_init(void); + extern void cachefiles_proc_cleanup(void); + static inline +-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif) ++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif) + { + unsigned long jif = jiffies - start_jif; + if (jif >= HZ) + jif = HZ - 1; +- atomic_inc(&histogram[jif]); ++ atomic_inc_unchecked(&histogram[jif]); + } + + #else +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c +index 14ac480..a62766c 100644 +--- a/fs/cachefiles/namei.c ++++ b/fs/cachefiles/namei.c +@@ -250,7 +250,7 @@ try_again: + /* first step is to make up a grave dentry in the graveyard */ + sprintf(nbuffer, "%08x%08x", + (uint32_t) get_seconds(), +- (uint32_t) atomic_inc_return(&cache->gravecounter)); ++ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter)); + + /* do the multiway lock magic */ + trap = lock_rename(cache->graveyard, dir); +diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c +index eccd339..4c1d995 100644 +--- a/fs/cachefiles/proc.c ++++ b/fs/cachefiles/proc.c +@@ -14,9 +14,9 @@ + #include <linux/seq_file.h> + #include "internal.h" + +-atomic_t cachefiles_lookup_histogram[HZ]; +-atomic_t cachefiles_mkdir_histogram[HZ]; +-atomic_t cachefiles_create_histogram[HZ]; ++atomic_unchecked_t cachefiles_lookup_histogram[HZ]; ++atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; ++atomic_unchecked_t cachefiles_create_histogram[HZ]; + + /* + * display the latency histogram +@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v) + return 0; + default: + index = (unsigned long) v - 3; +- x = atomic_read(&cachefiles_lookup_histogram[index]); +- y = atomic_read(&cachefiles_mkdir_histogram[index]); +- z = atomic_read(&cachefiles_create_histogram[index]); ++ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]); ++ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]); ++ z = atomic_read_unchecked(&cachefiles_create_histogram[index]); + if (x == 0 && y == 0 && z == 0) + return 0; + +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index a6c8c6f..5cf8517 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = file->f_op->write( +- file, (const void __user *) data, len, &pos); ++ file, (const void __force_user *) data, len, &pos); + set_fs(old_fs); + kunmap(page); + if (ret != len) +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c +index 42cec2a..2aba466 100644 +--- a/fs/cifs/cifs_debug.c ++++ b/fs/cifs/cifs_debug.c +@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file, + tcon = list_entry(tmp3, + struct cifsTconInfo, + tcon_list); +- atomic_set(&tcon->num_smbs_sent, 0); +- atomic_set(&tcon->num_writes, 0); +- atomic_set(&tcon->num_reads, 0); +- atomic_set(&tcon->num_oplock_brks, 0); +- atomic_set(&tcon->num_opens, 0); +- atomic_set(&tcon->num_posixopens, 0); +- atomic_set(&tcon->num_posixmkdirs, 0); +- atomic_set(&tcon->num_closes, 0); +- atomic_set(&tcon->num_deletes, 0); +- atomic_set(&tcon->num_mkdirs, 0); +- atomic_set(&tcon->num_rmdirs, 0); +- atomic_set(&tcon->num_renames, 0); +- atomic_set(&tcon->num_t2renames, 0); +- atomic_set(&tcon->num_ffirst, 0); +- atomic_set(&tcon->num_fnext, 0); +- atomic_set(&tcon->num_fclose, 0); +- atomic_set(&tcon->num_hardlinks, 0); +- atomic_set(&tcon->num_symlinks, 0); +- atomic_set(&tcon->num_locks, 0); ++ atomic_set_unchecked(&tcon->num_smbs_sent, 0); ++ atomic_set_unchecked(&tcon->num_writes, 0); ++ atomic_set_unchecked(&tcon->num_reads, 0); ++ atomic_set_unchecked(&tcon->num_oplock_brks, 0); ++ atomic_set_unchecked(&tcon->num_opens, 0); ++ atomic_set_unchecked(&tcon->num_posixopens, 0); ++ atomic_set_unchecked(&tcon->num_posixmkdirs, 0); ++ atomic_set_unchecked(&tcon->num_closes, 0); ++ atomic_set_unchecked(&tcon->num_deletes, 0); ++ atomic_set_unchecked(&tcon->num_mkdirs, 0); ++ atomic_set_unchecked(&tcon->num_rmdirs, 0); ++ atomic_set_unchecked(&tcon->num_renames, 0); ++ atomic_set_unchecked(&tcon->num_t2renames, 0); ++ atomic_set_unchecked(&tcon->num_ffirst, 0); ++ atomic_set_unchecked(&tcon->num_fnext, 0); ++ atomic_set_unchecked(&tcon->num_fclose, 0); ++ atomic_set_unchecked(&tcon->num_hardlinks, 0); ++ atomic_set_unchecked(&tcon->num_symlinks, 0); ++ atomic_set_unchecked(&tcon->num_locks, 0); + } + } + } +@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) + if (tcon->need_reconnect) + seq_puts(m, "\tDISCONNECTED "); + seq_printf(m, "\nSMBs: %d Oplock Breaks: %d", +- atomic_read(&tcon->num_smbs_sent), +- atomic_read(&tcon->num_oplock_brks)); ++ atomic_read_unchecked(&tcon->num_smbs_sent), ++ atomic_read_unchecked(&tcon->num_oplock_brks)); + seq_printf(m, "\nReads: %d Bytes: %lld", +- atomic_read(&tcon->num_reads), ++ atomic_read_unchecked(&tcon->num_reads), + (long long)(tcon->bytes_read)); + seq_printf(m, "\nWrites: %d Bytes: %lld", +- atomic_read(&tcon->num_writes), ++ atomic_read_unchecked(&tcon->num_writes), + (long long)(tcon->bytes_written)); + seq_printf(m, "\nFlushes: %d", +- atomic_read(&tcon->num_flushes)); ++ atomic_read_unchecked(&tcon->num_flushes)); + seq_printf(m, "\nLocks: %d HardLinks: %d " + "Symlinks: %d", +- atomic_read(&tcon->num_locks), +- atomic_read(&tcon->num_hardlinks), +- atomic_read(&tcon->num_symlinks)); ++ atomic_read_unchecked(&tcon->num_locks), ++ atomic_read_unchecked(&tcon->num_hardlinks), ++ atomic_read_unchecked(&tcon->num_symlinks)); + seq_printf(m, "\nOpens: %d Closes: %d " + "Deletes: %d", +- atomic_read(&tcon->num_opens), +- atomic_read(&tcon->num_closes), +- atomic_read(&tcon->num_deletes)); ++ atomic_read_unchecked(&tcon->num_opens), ++ atomic_read_unchecked(&tcon->num_closes), ++ atomic_read_unchecked(&tcon->num_deletes)); + seq_printf(m, "\nPosix Opens: %d " + "Posix Mkdirs: %d", +- atomic_read(&tcon->num_posixopens), +- atomic_read(&tcon->num_posixmkdirs)); ++ atomic_read_unchecked(&tcon->num_posixopens), ++ atomic_read_unchecked(&tcon->num_posixmkdirs)); + seq_printf(m, "\nMkdirs: %d Rmdirs: %d", +- atomic_read(&tcon->num_mkdirs), +- atomic_read(&tcon->num_rmdirs)); ++ atomic_read_unchecked(&tcon->num_mkdirs), ++ atomic_read_unchecked(&tcon->num_rmdirs)); + seq_printf(m, "\nRenames: %d T2 Renames %d", +- atomic_read(&tcon->num_renames), +- atomic_read(&tcon->num_t2renames)); ++ atomic_read_unchecked(&tcon->num_renames), ++ atomic_read_unchecked(&tcon->num_t2renames)); + seq_printf(m, "\nFindFirst: %d FNext %d " + "FClose %d", +- atomic_read(&tcon->num_ffirst), +- atomic_read(&tcon->num_fnext), +- atomic_read(&tcon->num_fclose)); ++ atomic_read_unchecked(&tcon->num_ffirst), ++ atomic_read_unchecked(&tcon->num_fnext), ++ atomic_read_unchecked(&tcon->num_fclose)); + } + } + } +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 1445407..68cb0dc 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -869,7 +869,7 @@ cifs_init_request_bufs(void) + cifs_req_cachep = kmem_cache_create("cifs_request", + CIFSMaxBufSize + + MAX_CIFS_HDR_SIZE, 0, +- SLAB_HWCACHE_ALIGN, NULL); ++ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL); + if (cifs_req_cachep == NULL) + return -ENOMEM; + +@@ -896,7 +896,7 @@ cifs_init_request_bufs(void) + efficient to alloc 1 per page off the slab compared to 17K (5page) + alloc of large cifs buffers even when page debugging is on */ + cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", +- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, ++ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, + NULL); + if (cifs_sm_req_cachep == NULL) { + mempool_destroy(cifs_req_poolp); +@@ -991,8 +991,8 @@ init_cifs(void) + atomic_set(&bufAllocCount, 0); + atomic_set(&smBufAllocCount, 0); + #ifdef CONFIG_CIFS_STATS2 +- atomic_set(&totBufAllocCount, 0); +- atomic_set(&totSmBufAllocCount, 0); ++ atomic_set_unchecked(&totBufAllocCount, 0); ++ atomic_set_unchecked(&totSmBufAllocCount, 0); + #endif /* CONFIG_CIFS_STATS2 */ + + atomic_set(&midCount, 0); +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index e29581e..1c22bab 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -252,28 +252,28 @@ struct cifsTconInfo { + __u16 Flags; /* optional support bits */ + enum statusEnum tidStatus; + #ifdef CONFIG_CIFS_STATS +- atomic_t num_smbs_sent; +- atomic_t num_writes; +- atomic_t num_reads; +- atomic_t num_flushes; +- atomic_t num_oplock_brks; +- atomic_t num_opens; +- atomic_t num_closes; +- atomic_t num_deletes; +- atomic_t num_mkdirs; +- atomic_t num_posixopens; +- atomic_t num_posixmkdirs; +- atomic_t num_rmdirs; +- atomic_t num_renames; +- atomic_t num_t2renames; +- atomic_t num_ffirst; +- atomic_t num_fnext; +- atomic_t num_fclose; +- atomic_t num_hardlinks; +- atomic_t num_symlinks; +- atomic_t num_locks; +- atomic_t num_acl_get; +- atomic_t num_acl_set; ++ atomic_unchecked_t num_smbs_sent; ++ atomic_unchecked_t num_writes; ++ atomic_unchecked_t num_reads; ++ atomic_unchecked_t num_flushes; ++ atomic_unchecked_t num_oplock_brks; ++ atomic_unchecked_t num_opens; ++ atomic_unchecked_t num_closes; ++ atomic_unchecked_t num_deletes; ++ atomic_unchecked_t num_mkdirs; ++ atomic_unchecked_t num_posixopens; ++ atomic_unchecked_t num_posixmkdirs; ++ atomic_unchecked_t num_rmdirs; ++ atomic_unchecked_t num_renames; ++ atomic_unchecked_t num_t2renames; ++ atomic_unchecked_t num_ffirst; ++ atomic_unchecked_t num_fnext; ++ atomic_unchecked_t num_fclose; ++ atomic_unchecked_t num_hardlinks; ++ atomic_unchecked_t num_symlinks; ++ atomic_unchecked_t num_locks; ++ atomic_unchecked_t num_acl_get; ++ atomic_unchecked_t num_acl_set; + #ifdef CONFIG_CIFS_STATS2 + unsigned long long time_writes; + unsigned long long time_reads; +@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb) + } + + #ifdef CONFIG_CIFS_STATS +-#define cifs_stats_inc atomic_inc ++#define cifs_stats_inc atomic_inc_unchecked + + static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon, + unsigned int bytes) +@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount; + /* Various Debug counters */ + GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ + #ifdef CONFIG_CIFS_STATS2 +-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ +-GLOBAL_EXTERN atomic_t totSmBufAllocCount; ++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */ ++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount; + #endif + GLOBAL_EXTERN atomic_t smBufAllocCount; + GLOBAL_EXTERN atomic_t midCount; +diff --git a/fs/cifs/link.c b/fs/cifs/link.c +index fc1e048..28b3441 100644 +--- a/fs/cifs/link.c ++++ b/fs/cifs/link.c +@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) + + void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie) + { +- char *p = nd_get_link(nd); ++ const char *p = nd_get_link(nd); + if (!IS_ERR(p)) + kfree(p); + } +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c +index 95b82e8..12a538d 100644 +--- a/fs/cifs/misc.c ++++ b/fs/cifs/misc.c +@@ -155,7 +155,7 @@ cifs_buf_get(void) + memset(ret_buf, 0, sizeof(struct smb_hdr) + 3); + atomic_inc(&bufAllocCount); + #ifdef CONFIG_CIFS_STATS2 +- atomic_inc(&totBufAllocCount); ++ atomic_inc_unchecked(&totBufAllocCount); + #endif /* CONFIG_CIFS_STATS2 */ + } + +@@ -190,7 +190,7 @@ cifs_small_buf_get(void) + /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ + atomic_inc(&smBufAllocCount); + #ifdef CONFIG_CIFS_STATS2 +- atomic_inc(&totSmBufAllocCount); ++ atomic_inc_unchecked(&totSmBufAllocCount); + #endif /* CONFIG_CIFS_STATS2 */ + + } +diff --git a/fs/coda/cache.c b/fs/coda/cache.c +index a5bf577..6d19845 100644 +--- a/fs/coda/cache.c ++++ b/fs/coda/cache.c +@@ -24,14 +24,14 @@ + #include <linux/coda_fs_i.h> + #include <linux/coda_cache.h> + +-static atomic_t permission_epoch = ATOMIC_INIT(0); ++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0); + + /* replace or extend an acl cache hit */ + void coda_cache_enter(struct inode *inode, int mask) + { + struct coda_inode_info *cii = ITOC(inode); + +- cii->c_cached_epoch = atomic_read(&permission_epoch); ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch); + if (cii->c_uid != current_fsuid()) { + cii->c_uid = current_fsuid(); + cii->c_cached_perm = mask; +@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask) + void coda_cache_clear_inode(struct inode *inode) + { + struct coda_inode_info *cii = ITOC(inode); +- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1; + } + + /* remove all acl caches */ + void coda_cache_clear_all(struct super_block *sb) + { +- atomic_inc(&permission_epoch); ++ atomic_inc_unchecked(&permission_epoch); + } + + +@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask) + + hit = (mask & cii->c_cached_perm) == mask && + cii->c_uid == current_fsuid() && +- cii->c_cached_epoch == atomic_read(&permission_epoch); ++ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch); + + return hit; + } +diff --git a/fs/compat.c b/fs/compat.c +index d1e2411..9a958d2 100644 +--- a/fs/compat.c ++++ b/fs/compat.c +@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _ + static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) + { + compat_ino_t ino = stat->ino; +- typeof(ubuf->st_uid) uid = 0; +- typeof(ubuf->st_gid) gid = 0; ++ typeof(((struct compat_stat *)0)->st_uid) uid = 0; ++ typeof(((struct compat_stat *)0)->st_gid) gid = 0; + int err; + + SET_UID(uid, stat->uid); +@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p) + + set_fs(KERNEL_DS); + /* The __user pointer cast is valid because of the set_fs() */ +- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64); ++ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64); + set_fs(oldfs); + /* truncating is ok because it's a user address */ + if (!ret) +@@ -830,6 +830,7 @@ struct compat_old_linux_dirent { + + struct compat_readdir_callback { + struct compat_old_linux_dirent __user *dirent; ++ struct file * file; + int result; + }; + +@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen, + buf->result = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + buf->result++; + dirent = buf->dirent; + if (!access_ok(VERIFY_WRITE, dirent, +@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, + + buf.result = 0; + buf.dirent = dirent; ++ buf.file = file; + + error = vfs_readdir(file, compat_fillonedir, &buf); + if (buf.result) +@@ -899,6 +905,7 @@ struct compat_linux_dirent { + struct compat_getdents_callback { + struct compat_linux_dirent __user *current_dir; + struct compat_linux_dirent __user *previous; ++ struct file * file; + int count; + int error; + }; +@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen, + buf->error = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, + buf.previous = NULL; + buf.count = count; + buf.error = 0; ++ buf.file = file; + + error = vfs_readdir(file, compat_filldir, &buf); + if (error >= 0) +@@ -987,6 +999,7 @@ out: + struct compat_getdents_callback64 { + struct linux_dirent64 __user *current_dir; + struct linux_dirent64 __user *previous; ++ struct file * file; + int count; + int error; + }; +@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + + if (dirent) { +@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, + buf.previous = NULL; + buf.count = count; + buf.error = 0; ++ buf.file = file; + + error = vfs_readdir(file, compat_filldir64, &buf); + if (error >= 0) + error = buf.error; + lastdirent = buf.previous; + if (lastdirent) { +- typeof(lastdirent->d_off) d_off = file->f_pos; ++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos; + if (__put_user_unaligned(d_off, &lastdirent->d_off)) + error = -EFAULT; + else +@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, + * verify all the pointers + */ + ret = -EINVAL; +- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0)) ++ if (nr_segs > UIO_MAXIOV) + goto out; + if (!file->f_op) + goto out; +@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename, + compat_uptr_t __user *envp, + struct pt_regs * regs) + { ++#ifdef CONFIG_GRKERNSEC ++ struct file *old_exec_file; ++ struct acl_subject_label *old_acl; ++ struct rlimit old_rlim[RLIM_NLIMITS]; ++#endif + struct linux_binprm *bprm; + struct file *file; + struct files_struct *displaced; + bool clear_in_exec; + int retval; ++ const struct cred *cred = current_cred(); ++ ++ /* ++ * We move the actual failure in case of RLIMIT_NPROC excess from ++ * set*uid() to execve() because too many poorly written programs ++ * don't check setuid() return code. Here we additionally recheck ++ * whether NPROC limit is still exceeded. ++ */ ++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1); ++ ++ if ((current->flags & PF_NPROC_EXCEEDED) && ++ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) { ++ retval = -EAGAIN; ++ goto out_ret; ++ } ++ ++ /* We're below the limit (still or again), so we don't want to make ++ * further execve() calls fail. */ ++ current->flags &= ~PF_NPROC_EXCEEDED; + + retval = unshare_files(&displaced); + if (retval) +@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename, + if (IS_ERR(file)) + goto out_unmark; + ++ if (gr_ptrace_readexec(file, bprm->unsafe)) { ++ retval = -EPERM; ++ goto out_file; ++ } ++ + sched_exec(); + + bprm->file = file; + bprm->filename = filename; + bprm->interp = filename; + ++ if (gr_process_user_ban()) { ++ retval = -EPERM; ++ goto out_file; ++ } ++ ++ retval = -EACCES; ++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) ++ goto out_file; ++ + retval = bprm_mm_init(bprm); + if (retval) + goto out_file; +@@ -1515,24 +1571,63 @@ int compat_do_execve(char * filename, + if (retval < 0) + goto out; + ++#ifdef CONFIG_GRKERNSEC ++ old_acl = current->acl; ++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); ++ old_exec_file = current->exec_file; ++ get_file(file); ++ current->exec_file = file; ++#endif ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* limit suid stack to 8MB ++ we saved the old limits above and will restore them if this exec fails ++ */ ++ if ((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) ++ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; ++#endif ++ ++ if (!gr_tpe_allow(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ if (gr_check_crash_exec(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt, ++ bprm->unsafe); ++ if (retval < 0) ++ goto out_fail; ++ + retval = copy_strings_kernel(1, &bprm->filename, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + bprm->exec = bprm->p; + retval = compat_copy_strings(bprm->envc, envp, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + retval = compat_copy_strings(bprm->argc, argv, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; ++ ++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); ++ ++ gr_handle_exec_args_compat(bprm, argv); + + retval = search_binary_handler(bprm, regs); + if (retval < 0) +- goto out; ++ goto out_fail; ++#ifdef CONFIG_GRKERNSEC ++ if (old_exec_file) ++ fput(old_exec_file); ++#endif + + /* execve succeeded */ ++ increment_exec_counter(); + current->fs->in_exec = 0; + current->in_execve = 0; + acct_update_integrals(current); +@@ -1541,6 +1636,14 @@ int compat_do_execve(char * filename, + put_files_struct(displaced); + return retval; + ++out_fail: ++#ifdef CONFIG_GRKERNSEC ++ current->acl = old_acl; ++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); ++ fput(current->exec_file); ++ current->exec_file = old_exec_file; ++#endif ++ + out: + if (bprm->mm) { + acct_arg_size(bprm, 0); +@@ -1711,6 +1814,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, + struct fdtable *fdt; + long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; + ++ pax_track_stack(); ++ + if (n < 0) + goto out_nofds; + +@@ -2151,7 +2256,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd, + oldfs = get_fs(); + set_fs(KERNEL_DS); + /* The __user pointer casts are valid because of the set_fs() */ +- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres); ++ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres); + set_fs(oldfs); + + if (err) +diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c +index 0adced2..bbb1b0d 100644 +--- a/fs/compat_binfmt_elf.c ++++ b/fs/compat_binfmt_elf.c +@@ -29,10 +29,12 @@ + #undef elfhdr + #undef elf_phdr + #undef elf_note ++#undef elf_dyn + #undef elf_addr_t + #define elfhdr elf32_hdr + #define elf_phdr elf32_phdr + #define elf_note elf32_note ++#define elf_dyn Elf32_Dyn + #define elf_addr_t Elf32_Addr + + /* +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c +index d84e705..d8c364c 100644 +--- a/fs/compat_ioctl.c ++++ b/fs/compat_ioctl.c +@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned + up = (struct compat_video_spu_palette __user *) arg; + err = get_user(palp, &up->palette); + err |= get_user(length, &up->length); ++ if (err) ++ return -EFAULT; + + up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); + err = put_user(compat_ptr(palp), &up_native->palette); +@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg) + return -EFAULT; + if (__get_user(udata, &ss32->iomem_base)) + return -EFAULT; +- ss.iomem_base = compat_ptr(udata); ++ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata); + if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || + __get_user(ss.port_high, &ss32->port_high)) + return -EFAULT; +@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg) + copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) || + copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) || + copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) || +- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32))) ++ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32))) + return -EFAULT; + + return ioctl_preallocate(file, p); +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index 8e48b52..f01ed91 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir + } + for (p=q->next; p!= &parent_sd->s_children; p=p->next) { + struct configfs_dirent *next; +- const char * name; ++ const unsigned char * name; ++ char d_name[sizeof(next->s_dentry->d_iname)]; + int len; + + next = list_entry(p, struct configfs_dirent, +@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir + continue; + + name = configfs_get_name(next); +- len = strlen(name); ++ if (next->s_dentry && name == next->s_dentry->d_iname) { ++ len = next->s_dentry->d_name.len; ++ memcpy(d_name, name, len); ++ name = d_name; ++ } else ++ len = strlen(name); + if (next->s_dentry) + ino = next->s_dentry->d_inode->i_ino; + else +diff --git a/fs/dcache.c b/fs/dcache.c +index 44c0aea..a663f95 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock); + + static struct kmem_cache *dentry_cache __read_mostly; + +-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) +- + /* + * This is the single most critical data structure when it comes + * to the dcache: the hashtable for lookups. Somebody should try +@@ -1108,11 +1106,11 @@ struct dentry * d_alloc_root(struct inode * root_inode) + return res; + } + +-static inline struct hlist_head *d_hash(struct dentry *parent, +- unsigned long hash) ++static inline struct hlist_head *d_hash(const struct dentry *parent, ++ unsigned int hash) + { +- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; +- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); ++ hash += (unsigned long) parent / L1_CACHE_BYTES; ++ hash = hash + (hash >> D_HASHBITS); + return dentry_hashtable + (hash & D_HASHMASK); + } + +@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages) + mempages -= reserve; + + names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, +- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL); + + dcache_init(); + inode_init(); +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index 39c6ee8..dcee0f1 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file); + struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) + { + return debugfs_create_file(name, ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT ++ S_IFDIR | S_IRWXU, ++#else + S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, ++#endif + parent, NULL, NULL); + } + EXPORT_SYMBOL_GPL(debugfs_create_dir); +diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c +index c010ecf..a8d8c59 100644 +--- a/fs/dlm/lockspace.c ++++ b/fs/dlm/lockspace.c +@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k) + kfree(ls); + } + +-static struct sysfs_ops dlm_attr_ops = { ++static const struct sysfs_ops dlm_attr_ops = { + .show = dlm_attr_show, + .store = dlm_attr_store, + }; +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c +index 7e164bb..62fa913 100644 +--- a/fs/ecryptfs/crypto.c ++++ b/fs/ecryptfs/crypto.c +@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page, + rc); + goto out; + } +- if (unlikely(ecryptfs_verbosity > 0)) { +- ecryptfs_printk(KERN_DEBUG, "Encrypting extent " +- "with iv:\n"); +- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes); +- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before " +- "encryption:\n"); +- ecryptfs_dump_hex((char *) +- (page_address(page) +- + (extent_offset * crypt_stat->extent_size)), +- 8); +- } + rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0, + page, (extent_offset + * crypt_stat->extent_size), +@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page, + goto out; + } + rc = 0; +- if (unlikely(ecryptfs_verbosity > 0)) { +- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; " +- "rc = [%d]\n", (extent_base + extent_offset), +- rc); +- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after " +- "encryption:\n"); +- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8); +- } + out: + return rc; + } +@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page, + rc); + goto out; + } +- if (unlikely(ecryptfs_verbosity > 0)) { +- ecryptfs_printk(KERN_DEBUG, "Decrypting extent " +- "with iv:\n"); +- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes); +- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before " +- "decryption:\n"); +- ecryptfs_dump_hex((char *) +- (page_address(enc_extent_page) +- + (extent_offset * crypt_stat->extent_size)), +- 8); +- } + rc = ecryptfs_decrypt_page_offset(crypt_stat, page, + (extent_offset + * crypt_stat->extent_size), +@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page, + goto out; + } + rc = 0; +- if (unlikely(ecryptfs_verbosity > 0)) { +- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; " +- "rc = [%d]\n", (extent_base + extent_offset), +- rc); +- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after " +- "decryption:\n"); +- ecryptfs_dump_hex((char *)(page_address(page) +- + (extent_offset +- * crypt_stat->extent_size)), 8); +- } + out: + return rc; + } +diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c +index 502b09f..49129f4 100644 +--- a/fs/ecryptfs/file.c ++++ b/fs/ecryptfs/file.c +@@ -348,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = { + #ifdef CONFIG_COMPAT + .compat_ioctl = ecryptfs_compat_ioctl, + #endif +- .mmap = generic_file_mmap, + .open = ecryptfs_open, + .flush = ecryptfs_flush, + .release = ecryptfs_release, +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c +index 90a6087..fa05803 100644 +--- a/fs/ecryptfs/inode.c ++++ b/fs/ecryptfs/inode.c +@@ -647,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, + old_fs = get_fs(); + set_fs(get_ds()); + rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, +- (char __user *)lower_buf, ++ (char __force_user *)lower_buf, + lower_bufsiz); + set_fs(old_fs); + if (rc < 0) +@@ -693,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) + } + old_fs = get_fs(); + set_fs(get_ds()); +- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); ++ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len); + set_fs(old_fs); + if (rc < 0) + goto out_free; +diff --git a/fs/exec.c b/fs/exec.c +index 86fafc6..6a109b9 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -56,12 +56,28 @@ + #include <linux/fsnotify.h> + #include <linux/fs_struct.h> + #include <linux/pipe_fs_i.h> ++#include <linux/random.h> ++#include <linux/seq_file.h> ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#include <linux/kallsyms.h> ++#include <linux/kdebug.h> ++#endif + + #include <asm/uaccess.h> + #include <asm/mmu_context.h> + #include <asm/tlb.h> + #include "internal.h" + ++#ifndef CONFIG_PAX_HAVE_ACL_FLAGS ++void __weak pax_set_initial_flags(struct linux_binprm *bprm) {} ++#endif ++ ++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS ++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++EXPORT_SYMBOL(pax_set_initial_flags_func); ++#endif ++ + int core_uses_pid; + char core_pattern[CORENAME_MAX_SIZE] = "core"; + unsigned int core_pipe_limit; +@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + int write) + { + struct page *page; +- int ret; + +-#ifdef CONFIG_STACK_GROWSUP +- if (write) { +- ret = expand_stack_downwards(bprm->vma, pos); +- if (ret < 0) +- return NULL; +- } +-#endif +- ret = get_user_pages(current, bprm->mm, pos, +- 1, write, 1, &page, NULL); +- if (ret <= 0) ++ if (0 > expand_stack_downwards(bprm->vma, pos)) ++ return NULL; ++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL)) + return NULL; + + if (write) { +@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + if (size <= ARG_MAX) + return page; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ // only allow 512KB for argv+env on suid/sgid binaries ++ // to prevent easy ASLR exhaustion ++ if (((bprm->cred->euid != current_euid()) || ++ (bprm->cred->egid != current_egid())) && ++ (size > (512 * 1024))) { ++ put_page(page); ++ return NULL; ++ } ++#endif ++ + /* + * Limit to 1/4-th the stack size for the argv+env strings. + * This ensures that: +@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) + vma->vm_end = STACK_TOP_MAX; + vma->vm_start = vma->vm_end - PAGE_SIZE; + vma->vm_flags = VM_STACK_FLAGS; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); +@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) + mm->stack_vm = mm->total_vm = 1; + up_write(&mm->mmap_sem); + bprm->p = vma->vm_end - sizeof(void *); ++ ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (randomize_va_space) ++ bprm->p ^= random32() & ~PAGE_MASK; ++#endif ++ + return 0; + err: + up_write(&mm->mmap_sem); +@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm) + int r; + mm_segment_t oldfs = get_fs(); + set_fs(KERNEL_DS); +- r = copy_strings(argc, (char __user * __user *)argv, bprm); ++ r = copy_strings(argc, (__force char __user * __user *)argv, bprm); + set_fs(oldfs); + return r; + } +@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) + unsigned long new_end = old_end - shift; + struct mmu_gather *tlb; + +- BUG_ON(new_start > new_end); ++ if (new_start >= new_end || new_start < mmap_min_addr) ++ return -ENOMEM; + + /* + * ensure there are no vmas between where we want to go +@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) + if (vma != find_vma(mm, new_start)) + return -EFAULT; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ BUG_ON(pax_find_mirror_vma(vma)); ++#endif ++ + /* + * cover the whole range: [new_start, old_end) + */ +@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm, + stack_top = arch_align_stack(stack_top); + stack_top = PAGE_ALIGN(stack_top); + +- if (unlikely(stack_top < mmap_min_addr) || +- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) +- return -ENOMEM; +- + stack_shift = vma->vm_end - stack_top; + + bprm->p -= stack_shift; +@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm, + bprm->exec -= stack_shift; + + down_write(&mm->mmap_sem); ++ ++ /* Move stack pages down in memory. */ ++ if (stack_shift) { ++ ret = shift_arg_pages(vma, stack_shift); ++ if (ret) ++ goto out_unlock; ++ } ++ + vm_flags = VM_STACK_FLAGS; + + /* +@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm, + vm_flags &= ~VM_EXEC; + vm_flags |= mm->def_flags; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end, + vm_flags); + if (ret) + goto out_unlock; + BUG_ON(prev != vma); + +- /* Move stack pages down in memory. */ +- if (stack_shift) { +- ret = shift_arg_pages(vma, stack_shift); +- if (ret) +- goto out_unlock; +- } +- + stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE; + stack_size = vma->vm_end - vma->vm_start; + /* +@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- result = vfs_read(file, (void __user *)addr, count, &pos); ++ result = vfs_read(file, (void __force_user *)addr, count, &pos); + set_fs(old_fs); + return result; + } +@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) + perf_event_comm(tsk); + } + ++static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len) ++{ ++ int i, ch; ++ ++ /* Copies the binary name from after last slash */ ++ for (i = 0; (ch = *(fn++)) != '\0';) { ++ if (ch == '/') ++ i = 0; /* overwrite what we wrote */ ++ else ++ if (i < len - 1) ++ tcomm[i++] = ch; ++ } ++ tcomm[i] = '\0'; ++} ++ + int flush_old_exec(struct linux_binprm * bprm) + { + int retval; +@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm) + + set_mm_exe_file(bprm->mm, bprm->file); + ++ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm)); + /* + * Release all of the old mmap stuff + */ +@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec); + + void setup_new_exec(struct linux_binprm * bprm) + { +- int i, ch; +- char * name; +- char tcomm[sizeof(current->comm)]; +- + arch_pick_mmap_layout(current->mm); + + /* This is the point of no return */ +@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm) + else + set_dumpable(current->mm, suid_dumpable); + +- name = bprm->filename; +- +- /* Copies the binary name from after last slash */ +- for (i=0; (ch = *(name++)) != '\0';) { +- if (ch == '/') +- i = 0; /* overwrite what we wrote */ +- else +- if (i < (sizeof(tcomm) - 1)) +- tcomm[i++] = ch; +- } +- tcomm[i] = '\0'; +- set_task_comm(current, tcomm); ++ set_task_comm(current, bprm->tcomm); + + /* Set the new mm task size. We have to do that late because it may + * depend on TIF_32BIT which is only updated in flush_thread() on +@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) + } + rcu_read_unlock(); + +- if (p->fs->users > n_fs) { ++ if (atomic_read(&p->fs->users) > n_fs) { + bprm->unsafe |= LSM_UNSAFE_SHARE; + } else { + res = -EAGAIN; +@@ -1339,6 +1384,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) + + EXPORT_SYMBOL(search_binary_handler); + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++DEFINE_PER_CPU(u64, exec_counter); ++static int __init init_exec_counters(void) ++{ ++ unsigned int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ per_cpu(exec_counter, cpu) = (u64)cpu; ++ } ++ ++ return 0; ++} ++early_initcall(init_exec_counters); ++#endif ++ + /* + * sys_execve() executes a new program. + */ +@@ -1347,11 +1407,35 @@ int do_execve(char * filename, + char __user *__user *envp, + struct pt_regs * regs) + { ++#ifdef CONFIG_GRKERNSEC ++ struct file *old_exec_file; ++ struct acl_subject_label *old_acl; ++ struct rlimit old_rlim[RLIM_NLIMITS]; ++#endif + struct linux_binprm *bprm; + struct file *file; + struct files_struct *displaced; + bool clear_in_exec; + int retval; ++ const struct cred *cred = current_cred(); ++ ++ /* ++ * We move the actual failure in case of RLIMIT_NPROC excess from ++ * set*uid() to execve() because too many poorly written programs ++ * don't check setuid() return code. Here we additionally recheck ++ * whether NPROC limit is still exceeded. ++ */ ++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1); ++ ++ if ((current->flags & PF_NPROC_EXCEEDED) && ++ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) { ++ retval = -EAGAIN; ++ goto out_ret; ++ } ++ ++ /* We're below the limit (still or again), so we don't want to make ++ * further execve() calls fail. */ ++ current->flags &= ~PF_NPROC_EXCEEDED; + + retval = unshare_files(&displaced); + if (retval) +@@ -1377,12 +1461,27 @@ int do_execve(char * filename, + if (IS_ERR(file)) + goto out_unmark; + ++ if (gr_ptrace_readexec(file, bprm->unsafe)) { ++ retval = -EPERM; ++ goto out_file; ++ } ++ + sched_exec(); + + bprm->file = file; + bprm->filename = filename; + bprm->interp = filename; + ++ if (gr_process_user_ban()) { ++ retval = -EPERM; ++ goto out_file; ++ } ++ ++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { ++ retval = -EACCES; ++ goto out_file; ++ } ++ + retval = bprm_mm_init(bprm); + if (retval) + goto out_file; +@@ -1399,25 +1498,66 @@ int do_execve(char * filename, + if (retval < 0) + goto out; + ++#ifdef CONFIG_GRKERNSEC ++ old_acl = current->acl; ++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); ++ old_exec_file = current->exec_file; ++ get_file(file); ++ current->exec_file = file; ++#endif ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* limit suid stack to 8MB ++ we saved the old limits above and will restore them if this exec fails ++ */ ++ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) && ++ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024))) ++ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; ++#endif ++ ++ if (!gr_tpe_allow(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ if (gr_check_crash_exec(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt, ++ bprm->unsafe); ++ if (retval < 0) ++ goto out_fail; ++ + retval = copy_strings_kernel(1, &bprm->filename, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + bprm->exec = bprm->p; + retval = copy_strings(bprm->envc, envp, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + retval = copy_strings(bprm->argc, argv, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; ++ ++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); ++ ++ gr_handle_exec_args(bprm, (const char __user *const __user *)argv); + + current->flags &= ~PF_KTHREAD; + retval = search_binary_handler(bprm,regs); + if (retval < 0) +- goto out; ++ goto out_fail; ++#ifdef CONFIG_GRKERNSEC ++ if (old_exec_file) ++ fput(old_exec_file); ++#endif + + /* execve succeeded */ ++ ++ increment_exec_counter(); + current->fs->in_exec = 0; + current->in_execve = 0; + acct_update_integrals(current); +@@ -1426,6 +1566,14 @@ int do_execve(char * filename, + put_files_struct(displaced); + return retval; + ++out_fail: ++#ifdef CONFIG_GRKERNSEC ++ current->acl = old_acl; ++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); ++ fput(current->exec_file); ++ current->exec_file = old_exec_file; ++#endif ++ + out: + if (bprm->mm) { + acct_arg_size(bprm, 0); +@@ -1591,6 +1739,229 @@ out: + return ispipe; + } + ++int pax_check_flags(unsigned long *flags) ++{ ++ int retval = 0; ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC) ++ if (*flags & MF_PAX_SEGMEXEC) ++ { ++ *flags &= ~MF_PAX_SEGMEXEC; ++ retval = -EINVAL; ++ } ++#endif ++ ++ if ((*flags & MF_PAX_PAGEEXEC) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ && (*flags & MF_PAX_SEGMEXEC) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_PAGEEXEC; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_MPROTECT) ++ ++#ifdef CONFIG_PAX_MPROTECT ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_MPROTECT; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_EMUTRAMP) ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_EMUTRAMP; ++ retval = -EINVAL; ++ } ++ ++ return retval; ++} ++ ++EXPORT_SYMBOL(pax_check_flags); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp) ++{ ++ struct task_struct *tsk = current; ++ struct mm_struct *mm = current->mm; ++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL); ++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL); ++ char *path_exec = NULL; ++ char *path_fault = NULL; ++ unsigned long start = 0UL, end = 0UL, offset = 0UL; ++ ++ if (buffer_exec && buffer_fault) { ++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL; ++ ++ down_read(&mm->mmap_sem); ++ vma = mm->mmap; ++ while (vma && (!vma_exec || !vma_fault)) { ++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) ++ vma_exec = vma; ++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end) ++ vma_fault = vma; ++ vma = vma->vm_next; ++ } ++ if (vma_exec) { ++ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE); ++ if (IS_ERR(path_exec)) ++ path_exec = "<path too long>"; ++ else { ++ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\"); ++ if (path_exec) { ++ *path_exec = 0; ++ path_exec = buffer_exec; ++ } else ++ path_exec = "<path too long>"; ++ } ++ } ++ if (vma_fault) { ++ start = vma_fault->vm_start; ++ end = vma_fault->vm_end; ++ offset = vma_fault->vm_pgoff << PAGE_SHIFT; ++ if (vma_fault->vm_file) { ++ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE); ++ if (IS_ERR(path_fault)) ++ path_fault = "<path too long>"; ++ else { ++ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\"); ++ if (path_fault) { ++ *path_fault = 0; ++ path_fault = buffer_fault; ++ } else ++ path_fault = "<path too long>"; ++ } ++ } else ++ path_fault = "<anonymous mapping>"; ++ } ++ up_read(&mm->mmap_sem); ++ } ++ if (tsk->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset); ++ else ++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); ++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, " ++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk), ++ task_uid(tsk), task_euid(tsk), pc, sp); ++ free_page((unsigned long)buffer_exec); ++ free_page((unsigned long)buffer_fault); ++ pax_report_insns(regs, pc, sp); ++ do_coredump(SIGKILL, SIGKILL, regs); ++} ++#endif ++ ++#ifdef CONFIG_PAX_REFCOUNT ++void pax_report_refcount_overflow(struct pt_regs *regs) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", ++ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ else ++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", ++ current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); ++ show_regs(regs); ++ force_sig_specific(SIGKILL, current); ++} ++#endif ++ ++#ifdef CONFIG_PAX_USERCOPY ++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */ ++int object_is_on_stack(const void *obj, unsigned long len) ++{ ++ const void * const stack = task_stack_page(current); ++ const void * const stackend = stack + THREAD_SIZE; ++ ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++ const void *frame = NULL; ++ const void *oldframe; ++#endif ++ ++ if (obj + len < obj) ++ return -1; ++ ++ if (obj + len <= stack || stackend <= obj) ++ return 0; ++ ++ if (obj < stack || stackend < obj + len) ++ return -1; ++ ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++ oldframe = __builtin_frame_address(1); ++ if (oldframe) ++ frame = __builtin_frame_address(2); ++ /* ++ low ----------------------------------------------> high ++ [saved bp][saved ip][args][local vars][saved bp][saved ip] ++ ^----------------^ ++ allow copies only within here ++ */ ++ while (stack <= frame && frame < stackend) { ++ /* if obj + len extends past the last frame, this ++ check won't pass and the next frame will be 0, ++ causing us to bail out and correctly report ++ the copy as invalid ++ */ ++ if (obj + len <= frame) ++ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1; ++ oldframe = frame; ++ frame = *(const void * const *)frame; ++ } ++ return -1; ++#else ++ return 1; ++#endif ++} ++ ++__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", ++ ¤t->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len); ++ else ++ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", ++ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len); ++ ++ dump_stack(); ++ gr_handle_kernel_exploit(); ++ do_group_exit(SIGKILL); ++} ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++void pax_track_stack(void) ++{ ++ unsigned long sp = (unsigned long)&sp; ++ if (sp < current_thread_info()->lowest_stack && ++ sp > (unsigned long)task_stack_page(current)) ++ current_thread_info()->lowest_stack = sp; ++} ++EXPORT_SYMBOL(pax_track_stack); ++#endif ++ ++#ifdef CONFIG_PAX_SIZE_OVERFLOW ++void report_size_overflow(const char *file, unsigned int line, const char *func) ++{ ++ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line); ++ dump_stack(); ++ do_group_exit(SIGKILL); ++} ++EXPORT_SYMBOL(report_size_overflow); ++#endif ++ + static int zap_process(struct task_struct *start) + { + struct task_struct *t; +@@ -1793,17 +2164,17 @@ static void wait_for_dump_helpers(struct file *file) + pipe = file->f_path.dentry->d_inode->i_pipe; + + pipe_lock(pipe); +- pipe->readers++; +- pipe->writers--; ++ atomic_inc(&pipe->readers); ++ atomic_dec(&pipe->writers); + +- while ((pipe->readers > 1) && (!signal_pending(current))) { ++ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) { + wake_up_interruptible_sync(&pipe->wait); + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + pipe_wait(pipe); + } + +- pipe->readers--; +- pipe->writers++; ++ atomic_dec(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe_unlock(pipe); + + } +@@ -1826,10 +2197,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) + char **helper_argv = NULL; + int helper_argc = 0; + int dump_count = 0; +- static atomic_t core_dump_count = ATOMIC_INIT(0); ++ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0); + + audit_core_dumps(signr); + ++ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL) ++ gr_handle_brute_attach(current, mm->flags); ++ + binfmt = mm->binfmt; + if (!binfmt || !binfmt->core_dump) + goto fail; +@@ -1874,6 +2248,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) + */ + clear_thread_flag(TIF_SIGPENDING); + ++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1); ++ + /* + * lock_kernel() because format_corename() is controlled by sysctl, which + * uses lock_kernel() +@@ -1908,7 +2284,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) + goto fail_unlock; + } + +- dump_count = atomic_inc_return(&core_dump_count); ++ dump_count = atomic_inc_return_unchecked(&core_dump_count); + if (core_pipe_limit && (core_pipe_limit < dump_count)) { + printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", + task_tgid_vnr(current), current->comm); +@@ -1972,7 +2348,7 @@ close_fail: + filp_close(file, NULL); + fail_dropcount: + if (dump_count) +- atomic_dec(&core_dump_count); ++ atomic_dec_unchecked(&core_dump_count); + fail_unlock: + if (helper_argv) + argv_free(helper_argv); +diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c +index 7f8d2e5..a1abdbb 100644 +--- a/fs/ext2/balloc.c ++++ b/fs/ext2/balloc.c +@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi) + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) && + sbi->s_resuid != current_fsuid() && + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { + return 0; +diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c +index 27967f9..9f2a5fb 100644 +--- a/fs/ext3/balloc.c ++++ b/fs/ext3/balloc.c +@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi) + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) && + sbi->s_resuid != current_fsuid() && + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { + return 0; +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index e85b63c..80398e6 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) + /* Hm, nope. Are (enough) root reserved blocks available? */ + if (sbi->s_resuid == current_fsuid() || + ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || +- capable(CAP_SYS_RESOURCE)) { ++ capable_nolog(CAP_SYS_RESOURCE)) { + if (free_blocks >= (nblocks + dirty_blocks)) + return 1; + } +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 67c46ed..1f237e5 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -1077,19 +1077,19 @@ struct ext4_sb_info { + + /* stats for buddy allocator */ + spinlock_t s_mb_pa_lock; +- atomic_t s_bal_reqs; /* number of reqs with len > 1 */ +- atomic_t s_bal_success; /* we found long enough chunks */ +- atomic_t s_bal_allocated; /* in blocks */ +- atomic_t s_bal_ex_scanned; /* total extents scanned */ +- atomic_t s_bal_goals; /* goal hits */ +- atomic_t s_bal_breaks; /* too long searches */ +- atomic_t s_bal_2orders; /* 2^order hits */ ++ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */ ++ atomic_unchecked_t s_bal_success; /* we found long enough chunks */ ++ atomic_unchecked_t s_bal_allocated; /* in blocks */ ++ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */ ++ atomic_unchecked_t s_bal_goals; /* goal hits */ ++ atomic_unchecked_t s_bal_breaks; /* too long searches */ ++ atomic_unchecked_t s_bal_2orders; /* 2^order hits */ + spinlock_t s_bal_lock; + unsigned long s_mb_buddies_generated; + unsigned long long s_mb_generation_time; +- atomic_t s_mb_lost_chunks; +- atomic_t s_mb_preallocated; +- atomic_t s_mb_discarded; ++ atomic_unchecked_t s_mb_lost_chunks; ++ atomic_unchecked_t s_mb_preallocated; ++ atomic_unchecked_t s_mb_discarded; + atomic_t s_lock_busy; + + /* locality groups */ +diff --git a/fs/ext4/file.c b/fs/ext4/file.c +index 2a60541..7439d61 100644 +--- a/fs/ext4/file.c ++++ b/fs/ext4/file.c +@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp) + cp = d_path(&path, buf, sizeof(buf)); + path_put(&path); + if (!IS_ERR(cp)) { +- memcpy(sbi->s_es->s_last_mounted, cp, +- sizeof(sbi->s_es->s_last_mounted)); ++ strlcpy(sbi->s_es->s_last_mounted, cp, ++ sizeof(sbi->s_es->s_last_mounted)); + sb->s_dirt = 1; + } + } +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 42bac1b..0aab9d8 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, + BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); + + if (EXT4_SB(sb)->s_mb_stats) +- atomic_inc(&EXT4_SB(sb)->s_bal_2orders); ++ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders); + + break; + } +@@ -2131,7 +2131,7 @@ repeat: + ac->ac_status = AC_STATUS_CONTINUE; + ac->ac_flags |= EXT4_MB_HINT_FIRST; + cr = 3; +- atomic_inc(&sbi->s_mb_lost_chunks); ++ atomic_inc_unchecked(&sbi->s_mb_lost_chunks); + goto repeat; + } + } +@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) + ext4_grpblk_t counters[16]; + } sg; + ++ pax_track_stack(); ++ + group--; + if (group == 0) + seq_printf(seq, "#%-5s: %-5s %-5s %-5s " +@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb) + if (sbi->s_mb_stats) { + printk(KERN_INFO + "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n", +- atomic_read(&sbi->s_bal_allocated), +- atomic_read(&sbi->s_bal_reqs), +- atomic_read(&sbi->s_bal_success)); ++ atomic_read_unchecked(&sbi->s_bal_allocated), ++ atomic_read_unchecked(&sbi->s_bal_reqs), ++ atomic_read_unchecked(&sbi->s_bal_success)); + printk(KERN_INFO + "EXT4-fs: mballoc: %u extents scanned, %u goal hits, " + "%u 2^N hits, %u breaks, %u lost\n", +- atomic_read(&sbi->s_bal_ex_scanned), +- atomic_read(&sbi->s_bal_goals), +- atomic_read(&sbi->s_bal_2orders), +- atomic_read(&sbi->s_bal_breaks), +- atomic_read(&sbi->s_mb_lost_chunks)); ++ atomic_read_unchecked(&sbi->s_bal_ex_scanned), ++ atomic_read_unchecked(&sbi->s_bal_goals), ++ atomic_read_unchecked(&sbi->s_bal_2orders), ++ atomic_read_unchecked(&sbi->s_bal_breaks), ++ atomic_read_unchecked(&sbi->s_mb_lost_chunks)); + printk(KERN_INFO + "EXT4-fs: mballoc: %lu generated and it took %Lu\n", + sbi->s_mb_buddies_generated++, + sbi->s_mb_generation_time); + printk(KERN_INFO + "EXT4-fs: mballoc: %u preallocated, %u discarded\n", +- atomic_read(&sbi->s_mb_preallocated), +- atomic_read(&sbi->s_mb_discarded)); ++ atomic_read_unchecked(&sbi->s_mb_preallocated), ++ atomic_read_unchecked(&sbi->s_mb_discarded)); + } + + free_percpu(sbi->s_locality_groups); +@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); + + if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { +- atomic_inc(&sbi->s_bal_reqs); +- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); ++ atomic_inc_unchecked(&sbi->s_bal_reqs); ++ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); + if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len) +- atomic_inc(&sbi->s_bal_success); +- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); ++ atomic_inc_unchecked(&sbi->s_bal_success); ++ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned); + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) +- atomic_inc(&sbi->s_bal_goals); ++ atomic_inc_unchecked(&sbi->s_bal_goals); + if (ac->ac_found > sbi->s_mb_max_to_scan) +- atomic_inc(&sbi->s_bal_breaks); ++ atomic_inc_unchecked(&sbi->s_bal_breaks); + } + + if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) +@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) + trace_ext4_mb_new_inode_pa(ac, pa); + + ext4_mb_use_inode_pa(ac, pa); +- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); ++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); + + ei = EXT4_I(ac->ac_inode); + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); +@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) + trace_ext4_mb_new_group_pa(ac, pa); + + ext4_mb_use_group_pa(ac, pa); +- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); ++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); + + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); + lg = ac->ac_lg; +@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, + * from the bitmap and continue. + */ + } +- atomic_add(free, &sbi->s_mb_discarded); ++ atomic_add_unchecked(free, &sbi->s_mb_discarded); + + return err; + } +@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, + ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); + BUG_ON(group != e4b->bd_group && pa->pa_len != 0); + mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); +- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); ++ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); + + if (ac) { + ac->ac_sb = sb; +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index f1e7077..edd86b2 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj) + } + + +-static struct sysfs_ops ext4_attr_ops = { ++static const struct sysfs_ops ext4_attr_ops = { + .show = ext4_attr_show, + .store = ext4_attr_store, + }; +diff --git a/fs/fcntl.c b/fs/fcntl.c +index 97e01dc..e9aab2d 100644 +--- a/fs/fcntl.c ++++ b/fs/fcntl.c +@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, + if (err) + return err; + ++ if (gr_handle_chroot_fowner(pid, type)) ++ return -ENOENT; ++ if (gr_check_protected_task_fowner(pid, type)) ++ return -EACCES; ++ + f_modown(filp, pid, type, force); + return 0; + } +@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp) + + static int f_setown_ex(struct file *filp, unsigned long arg) + { +- struct f_owner_ex * __user owner_p = (void * __user)arg; ++ struct f_owner_ex __user *owner_p = (void __user *)arg; + struct f_owner_ex owner; + struct pid *pid; + int type; +@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg) + + static int f_getown_ex(struct file *filp, unsigned long arg) + { +- struct f_owner_ex * __user owner_p = (void * __user)arg; ++ struct f_owner_ex __user *owner_p = (void __user *)arg; + struct f_owner_ex owner; + int ret = 0; + +@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, + switch (cmd) { + case F_DUPFD: + case F_DUPFD_CLOEXEC: ++ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0); + if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) + break; + err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0); +diff --git a/fs/fifo.c b/fs/fifo.c +index f8f97b8..b1f2259 100644 +--- a/fs/fifo.c ++++ b/fs/fifo.c +@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp) + */ + filp->f_op = &read_pipefifo_fops; + pipe->r_counter++; +- if (pipe->readers++ == 0) ++ if (atomic_inc_return(&pipe->readers) == 1) + wake_up_partner(inode); + +- if (!pipe->writers) { ++ if (!atomic_read(&pipe->writers)) { + if ((filp->f_flags & O_NONBLOCK)) { + /* suppress POLLHUP until we have + * seen a writer */ +@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp) + * errno=ENXIO when there is no process reading the FIFO. + */ + ret = -ENXIO; +- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers) ++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers)) + goto err; + + filp->f_op = &write_pipefifo_fops; + pipe->w_counter++; +- if (!pipe->writers++) ++ if (atomic_inc_return(&pipe->writers) == 1) + wake_up_partner(inode); + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + wait_for_partner(inode, &pipe->r_counter); + if (signal_pending(current)) + goto err_wr; +@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp) + */ + filp->f_op = &rdwr_pipefifo_fops; + +- pipe->readers++; +- pipe->writers++; ++ atomic_inc(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe->r_counter++; + pipe->w_counter++; +- if (pipe->readers == 1 || pipe->writers == 1) ++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1) + wake_up_partner(inode); + break; + +@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp) + return 0; + + err_rd: +- if (!--pipe->readers) ++ if (atomic_dec_and_test(&pipe->readers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; + + err_wr: +- if (!--pipe->writers) ++ if (atomic_dec_and_test(&pipe->writers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; + + err: +- if (!pipe->readers && !pipe->writers) ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) + free_pipe_info(inode); + + err_nocleanup: +diff --git a/fs/file.c b/fs/file.c +index 87e1290..a930cc4 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -14,6 +14,7 @@ + #include <linux/slab.h> + #include <linux/vmalloc.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/fdtable.h> + #include <linux/bitops.h> + #include <linux/interrupt.h> +@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr) + * N.B. For clone tasks sharing a files structure, this test + * will limit the total number of files that can be opened. + */ ++ ++ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0); + if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) + return -EMFILE; + +diff --git a/fs/filesystems.c b/fs/filesystems.c +index a24c58e..53f91ee 100644 +--- a/fs/filesystems.c ++++ b/fs/filesystems.c +@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name) + int len = dot ? dot - name : strlen(name); + + fs = __get_fs_type(name, len); ++ ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0)) ++#else + if (!fs && (request_module("%.*s", len, name) == 0)) ++#endif + fs = __get_fs_type(name, len); + + if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { +diff --git a/fs/fs_struct.c b/fs/fs_struct.c +index eee0590..1181166 100644 +--- a/fs/fs_struct.c ++++ b/fs/fs_struct.c +@@ -4,6 +4,7 @@ + #include <linux/path.h> + #include <linux/slab.h> + #include <linux/fs_struct.h> ++#include <linux/grsecurity.h> + + /* + * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. +@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path) + old_root = fs->root; + fs->root = *path; + path_get(path); ++ gr_set_chroot_entries(current, path); + write_unlock(&fs->lock); + if (old_root.dentry) + path_put(&old_root); +@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) + && fs->root.mnt == old_root->mnt) { + path_get(new_root); + fs->root = *new_root; ++ gr_set_chroot_entries(p, new_root); + count++; + } + if (fs->pwd.dentry == old_root->dentry +@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk) + task_lock(tsk); + write_lock(&fs->lock); + tsk->fs = NULL; +- kill = !--fs->users; ++ gr_clear_chroot_entries(tsk); ++ kill = !atomic_dec_return(&fs->users); + write_unlock(&fs->lock); + task_unlock(tsk); + if (kill) +@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) + struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); + /* We don't need to lock fs - think why ;-) */ + if (fs) { +- fs->users = 1; ++ atomic_set(&fs->users, 1); + fs->in_exec = 0; + rwlock_init(&fs->lock); + fs->umask = old->umask; +@@ -127,8 +131,9 @@ int unshare_fs_struct(void) + + task_lock(current); + write_lock(&fs->lock); +- kill = !--fs->users; ++ kill = !atomic_dec_return(&fs->users); + current->fs = new_fs; ++ gr_set_chroot_entries(current, &new_fs->root); + write_unlock(&fs->lock); + task_unlock(current); + +@@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct); + + int current_umask(void) + { +- return current->fs->umask; ++ return current->fs->umask | gr_acl_umask(); + } + EXPORT_SYMBOL(current_umask); + + /* to be mentioned only in INIT_TASK */ + struct fs_struct init_fs = { +- .users = 1, ++ .users = ATOMIC_INIT(1), + .lock = __RW_LOCK_UNLOCKED(init_fs.lock), + .umask = 0022, + }; +@@ -162,12 +167,13 @@ void daemonize_fs_struct(void) + task_lock(current); + + write_lock(&init_fs.lock); +- init_fs.users++; ++ atomic_inc(&init_fs.users); + write_unlock(&init_fs.lock); + + write_lock(&fs->lock); + current->fs = &init_fs; +- kill = !--fs->users; ++ gr_set_chroot_entries(current, ¤t->fs->root); ++ kill = !atomic_dec_return(&fs->users); + write_unlock(&fs->lock); + + task_unlock(current); +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c +index 9905350..02eaec4 100644 +--- a/fs/fscache/cookie.c ++++ b/fs/fscache/cookie.c +@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie( + parent ? (char *) parent->def->name : "<no-parent>", + def->name, netfs_data); + +- fscache_stat(&fscache_n_acquires); ++ fscache_stat_unchecked(&fscache_n_acquires); + + /* if there's no parent cookie, then we don't create one here either */ + if (!parent) { +- fscache_stat(&fscache_n_acquires_null); ++ fscache_stat_unchecked(&fscache_n_acquires_null); + _leave(" [no parent]"); + return NULL; + } +@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie( + /* allocate and initialise a cookie */ + cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL); + if (!cookie) { +- fscache_stat(&fscache_n_acquires_oom); ++ fscache_stat_unchecked(&fscache_n_acquires_oom); + _leave(" [ENOMEM]"); + return NULL; + } +@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie( + + switch (cookie->def->type) { + case FSCACHE_COOKIE_TYPE_INDEX: +- fscache_stat(&fscache_n_cookie_index); ++ fscache_stat_unchecked(&fscache_n_cookie_index); + break; + case FSCACHE_COOKIE_TYPE_DATAFILE: +- fscache_stat(&fscache_n_cookie_data); ++ fscache_stat_unchecked(&fscache_n_cookie_data); + break; + default: +- fscache_stat(&fscache_n_cookie_special); ++ fscache_stat_unchecked(&fscache_n_cookie_special); + break; + } + +@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie( + if (fscache_acquire_non_index_cookie(cookie) < 0) { + atomic_dec(&parent->n_children); + __fscache_cookie_put(cookie); +- fscache_stat(&fscache_n_acquires_nobufs); ++ fscache_stat_unchecked(&fscache_n_acquires_nobufs); + _leave(" = NULL"); + return NULL; + } + } + +- fscache_stat(&fscache_n_acquires_ok); ++ fscache_stat_unchecked(&fscache_n_acquires_ok); + _leave(" = %p", cookie); + return cookie; + } +@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) + cache = fscache_select_cache_for_object(cookie->parent); + if (!cache) { + up_read(&fscache_addremove_sem); +- fscache_stat(&fscache_n_acquires_no_cache); ++ fscache_stat_unchecked(&fscache_n_acquires_no_cache); + _leave(" = -ENOMEDIUM [no cache]"); + return -ENOMEDIUM; + } +@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache, + object = cache->ops->alloc_object(cache, cookie); + fscache_stat_d(&fscache_n_cop_alloc_object); + if (IS_ERR(object)) { +- fscache_stat(&fscache_n_object_no_alloc); ++ fscache_stat_unchecked(&fscache_n_object_no_alloc); + ret = PTR_ERR(object); + goto error; + } + +- fscache_stat(&fscache_n_object_alloc); ++ fscache_stat_unchecked(&fscache_n_object_alloc); + + object->debug_id = atomic_inc_return(&fscache_object_debug_id); + +@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie) + struct fscache_object *object; + struct hlist_node *_p; + +- fscache_stat(&fscache_n_updates); ++ fscache_stat_unchecked(&fscache_n_updates); + + if (!cookie) { +- fscache_stat(&fscache_n_updates_null); ++ fscache_stat_unchecked(&fscache_n_updates_null); + _leave(" [no cookie]"); + return; + } +@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) + struct fscache_object *object; + unsigned long event; + +- fscache_stat(&fscache_n_relinquishes); ++ fscache_stat_unchecked(&fscache_n_relinquishes); + if (retire) +- fscache_stat(&fscache_n_relinquishes_retire); ++ fscache_stat_unchecked(&fscache_n_relinquishes_retire); + + if (!cookie) { +- fscache_stat(&fscache_n_relinquishes_null); ++ fscache_stat_unchecked(&fscache_n_relinquishes_null); + _leave(" [no cookie]"); + return; + } +@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) + + /* wait for the cookie to finish being instantiated (or to fail) */ + if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) { +- fscache_stat(&fscache_n_relinquishes_waitcrt); ++ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt); + wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } +diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h +index edd7434..0725e66 100644 +--- a/fs/fscache/internal.h ++++ b/fs/fscache/internal.h +@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void); + extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; + extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; + +-extern atomic_t fscache_n_op_pend; +-extern atomic_t fscache_n_op_run; +-extern atomic_t fscache_n_op_enqueue; +-extern atomic_t fscache_n_op_deferred_release; +-extern atomic_t fscache_n_op_release; +-extern atomic_t fscache_n_op_gc; +-extern atomic_t fscache_n_op_cancelled; +-extern atomic_t fscache_n_op_rejected; ++extern atomic_unchecked_t fscache_n_op_pend; ++extern atomic_unchecked_t fscache_n_op_run; ++extern atomic_unchecked_t fscache_n_op_enqueue; ++extern atomic_unchecked_t fscache_n_op_deferred_release; ++extern atomic_unchecked_t fscache_n_op_release; ++extern atomic_unchecked_t fscache_n_op_gc; ++extern atomic_unchecked_t fscache_n_op_cancelled; ++extern atomic_unchecked_t fscache_n_op_rejected; + +-extern atomic_t fscache_n_attr_changed; +-extern atomic_t fscache_n_attr_changed_ok; +-extern atomic_t fscache_n_attr_changed_nobufs; +-extern atomic_t fscache_n_attr_changed_nomem; +-extern atomic_t fscache_n_attr_changed_calls; ++extern atomic_unchecked_t fscache_n_attr_changed; ++extern atomic_unchecked_t fscache_n_attr_changed_ok; ++extern atomic_unchecked_t fscache_n_attr_changed_nobufs; ++extern atomic_unchecked_t fscache_n_attr_changed_nomem; ++extern atomic_unchecked_t fscache_n_attr_changed_calls; + +-extern atomic_t fscache_n_allocs; +-extern atomic_t fscache_n_allocs_ok; +-extern atomic_t fscache_n_allocs_wait; +-extern atomic_t fscache_n_allocs_nobufs; +-extern atomic_t fscache_n_allocs_intr; +-extern atomic_t fscache_n_allocs_object_dead; +-extern atomic_t fscache_n_alloc_ops; +-extern atomic_t fscache_n_alloc_op_waits; ++extern atomic_unchecked_t fscache_n_allocs; ++extern atomic_unchecked_t fscache_n_allocs_ok; ++extern atomic_unchecked_t fscache_n_allocs_wait; ++extern atomic_unchecked_t fscache_n_allocs_nobufs; ++extern atomic_unchecked_t fscache_n_allocs_intr; ++extern atomic_unchecked_t fscache_n_allocs_object_dead; ++extern atomic_unchecked_t fscache_n_alloc_ops; ++extern atomic_unchecked_t fscache_n_alloc_op_waits; + +-extern atomic_t fscache_n_retrievals; +-extern atomic_t fscache_n_retrievals_ok; +-extern atomic_t fscache_n_retrievals_wait; +-extern atomic_t fscache_n_retrievals_nodata; +-extern atomic_t fscache_n_retrievals_nobufs; +-extern atomic_t fscache_n_retrievals_intr; +-extern atomic_t fscache_n_retrievals_nomem; +-extern atomic_t fscache_n_retrievals_object_dead; +-extern atomic_t fscache_n_retrieval_ops; +-extern atomic_t fscache_n_retrieval_op_waits; ++extern atomic_unchecked_t fscache_n_retrievals; ++extern atomic_unchecked_t fscache_n_retrievals_ok; ++extern atomic_unchecked_t fscache_n_retrievals_wait; ++extern atomic_unchecked_t fscache_n_retrievals_nodata; ++extern atomic_unchecked_t fscache_n_retrievals_nobufs; ++extern atomic_unchecked_t fscache_n_retrievals_intr; ++extern atomic_unchecked_t fscache_n_retrievals_nomem; ++extern atomic_unchecked_t fscache_n_retrievals_object_dead; ++extern atomic_unchecked_t fscache_n_retrieval_ops; ++extern atomic_unchecked_t fscache_n_retrieval_op_waits; + +-extern atomic_t fscache_n_stores; +-extern atomic_t fscache_n_stores_ok; +-extern atomic_t fscache_n_stores_again; +-extern atomic_t fscache_n_stores_nobufs; +-extern atomic_t fscache_n_stores_oom; +-extern atomic_t fscache_n_store_ops; +-extern atomic_t fscache_n_store_calls; +-extern atomic_t fscache_n_store_pages; +-extern atomic_t fscache_n_store_radix_deletes; +-extern atomic_t fscache_n_store_pages_over_limit; ++extern atomic_unchecked_t fscache_n_stores; ++extern atomic_unchecked_t fscache_n_stores_ok; ++extern atomic_unchecked_t fscache_n_stores_again; ++extern atomic_unchecked_t fscache_n_stores_nobufs; ++extern atomic_unchecked_t fscache_n_stores_oom; ++extern atomic_unchecked_t fscache_n_store_ops; ++extern atomic_unchecked_t fscache_n_store_calls; ++extern atomic_unchecked_t fscache_n_store_pages; ++extern atomic_unchecked_t fscache_n_store_radix_deletes; ++extern atomic_unchecked_t fscache_n_store_pages_over_limit; + +-extern atomic_t fscache_n_store_vmscan_not_storing; +-extern atomic_t fscache_n_store_vmscan_gone; +-extern atomic_t fscache_n_store_vmscan_busy; +-extern atomic_t fscache_n_store_vmscan_cancelled; ++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing; ++extern atomic_unchecked_t fscache_n_store_vmscan_gone; ++extern atomic_unchecked_t fscache_n_store_vmscan_busy; ++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled; + +-extern atomic_t fscache_n_marks; +-extern atomic_t fscache_n_uncaches; ++extern atomic_unchecked_t fscache_n_marks; ++extern atomic_unchecked_t fscache_n_uncaches; + +-extern atomic_t fscache_n_acquires; +-extern atomic_t fscache_n_acquires_null; +-extern atomic_t fscache_n_acquires_no_cache; +-extern atomic_t fscache_n_acquires_ok; +-extern atomic_t fscache_n_acquires_nobufs; +-extern atomic_t fscache_n_acquires_oom; ++extern atomic_unchecked_t fscache_n_acquires; ++extern atomic_unchecked_t fscache_n_acquires_null; ++extern atomic_unchecked_t fscache_n_acquires_no_cache; ++extern atomic_unchecked_t fscache_n_acquires_ok; ++extern atomic_unchecked_t fscache_n_acquires_nobufs; ++extern atomic_unchecked_t fscache_n_acquires_oom; + +-extern atomic_t fscache_n_updates; +-extern atomic_t fscache_n_updates_null; +-extern atomic_t fscache_n_updates_run; ++extern atomic_unchecked_t fscache_n_updates; ++extern atomic_unchecked_t fscache_n_updates_null; ++extern atomic_unchecked_t fscache_n_updates_run; + +-extern atomic_t fscache_n_relinquishes; +-extern atomic_t fscache_n_relinquishes_null; +-extern atomic_t fscache_n_relinquishes_waitcrt; +-extern atomic_t fscache_n_relinquishes_retire; ++extern atomic_unchecked_t fscache_n_relinquishes; ++extern atomic_unchecked_t fscache_n_relinquishes_null; ++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt; ++extern atomic_unchecked_t fscache_n_relinquishes_retire; + +-extern atomic_t fscache_n_cookie_index; +-extern atomic_t fscache_n_cookie_data; +-extern atomic_t fscache_n_cookie_special; ++extern atomic_unchecked_t fscache_n_cookie_index; ++extern atomic_unchecked_t fscache_n_cookie_data; ++extern atomic_unchecked_t fscache_n_cookie_special; + +-extern atomic_t fscache_n_object_alloc; +-extern atomic_t fscache_n_object_no_alloc; +-extern atomic_t fscache_n_object_lookups; +-extern atomic_t fscache_n_object_lookups_negative; +-extern atomic_t fscache_n_object_lookups_positive; +-extern atomic_t fscache_n_object_lookups_timed_out; +-extern atomic_t fscache_n_object_created; +-extern atomic_t fscache_n_object_avail; +-extern atomic_t fscache_n_object_dead; ++extern atomic_unchecked_t fscache_n_object_alloc; ++extern atomic_unchecked_t fscache_n_object_no_alloc; ++extern atomic_unchecked_t fscache_n_object_lookups; ++extern atomic_unchecked_t fscache_n_object_lookups_negative; ++extern atomic_unchecked_t fscache_n_object_lookups_positive; ++extern atomic_unchecked_t fscache_n_object_lookups_timed_out; ++extern atomic_unchecked_t fscache_n_object_created; ++extern atomic_unchecked_t fscache_n_object_avail; ++extern atomic_unchecked_t fscache_n_object_dead; + +-extern atomic_t fscache_n_checkaux_none; +-extern atomic_t fscache_n_checkaux_okay; +-extern atomic_t fscache_n_checkaux_update; +-extern atomic_t fscache_n_checkaux_obsolete; ++extern atomic_unchecked_t fscache_n_checkaux_none; ++extern atomic_unchecked_t fscache_n_checkaux_okay; ++extern atomic_unchecked_t fscache_n_checkaux_update; ++extern atomic_unchecked_t fscache_n_checkaux_obsolete; + + extern atomic_t fscache_n_cop_alloc_object; + extern atomic_t fscache_n_cop_lookup_object; +@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat) + atomic_inc(stat); + } + ++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat) ++{ ++ atomic_inc_unchecked(stat); ++} ++ + static inline void fscache_stat_d(atomic_t *stat) + { + atomic_dec(stat); +@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops; + + #define __fscache_stat(stat) (NULL) + #define fscache_stat(stat) do {} while (0) ++#define fscache_stat_unchecked(stat) do {} while (0) + #define fscache_stat_d(stat) do {} while (0) + #endif + +diff --git a/fs/fscache/object.c b/fs/fscache/object.c +index e513ac5..e888d34 100644 +--- a/fs/fscache/object.c ++++ b/fs/fscache/object.c +@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object) + /* update the object metadata on disk */ + case FSCACHE_OBJECT_UPDATING: + clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); +- fscache_stat(&fscache_n_updates_run); ++ fscache_stat_unchecked(&fscache_n_updates_run); + fscache_stat(&fscache_n_cop_update_object); + object->cache->ops->update_object(object); + fscache_stat_d(&fscache_n_cop_update_object); +@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object) + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_DEAD; + spin_unlock(&object->lock); +- fscache_stat(&fscache_n_object_dead); ++ fscache_stat_unchecked(&fscache_n_object_dead); + goto terminal_transit; + + /* handle the parent cache of this object being withdrawn from +@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object) + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_DEAD; + spin_unlock(&object->lock); +- fscache_stat(&fscache_n_object_dead); ++ fscache_stat_unchecked(&fscache_n_object_dead); + goto terminal_transit; + + /* complain about the object being woken up once it is +@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object) + parent->cookie->def->name, cookie->def->name, + object->cache->tag->name); + +- fscache_stat(&fscache_n_object_lookups); ++ fscache_stat_unchecked(&fscache_n_object_lookups); + fscache_stat(&fscache_n_cop_lookup_object); + ret = object->cache->ops->lookup_object(object); + fscache_stat_d(&fscache_n_cop_lookup_object); +@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object) + if (ret == -ETIMEDOUT) { + /* probably stuck behind another object, so move this one to + * the back of the queue */ +- fscache_stat(&fscache_n_object_lookups_timed_out); ++ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out); + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + } + +@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object) + + spin_lock(&object->lock); + if (object->state == FSCACHE_OBJECT_LOOKING_UP) { +- fscache_stat(&fscache_n_object_lookups_negative); ++ fscache_stat_unchecked(&fscache_n_object_lookups_negative); + + /* transit here to allow write requests to begin stacking up + * and read requests to begin returning ENODATA */ +@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object) + * result, in which case there may be data available */ + spin_lock(&object->lock); + if (object->state == FSCACHE_OBJECT_LOOKING_UP) { +- fscache_stat(&fscache_n_object_lookups_positive); ++ fscache_stat_unchecked(&fscache_n_object_lookups_positive); + + clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + +@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object) + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + } else { + ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); +- fscache_stat(&fscache_n_object_created); ++ fscache_stat_unchecked(&fscache_n_object_created); + + object->state = FSCACHE_OBJECT_AVAILABLE; + spin_unlock(&object->lock); +@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object) + fscache_enqueue_dependents(object); + + fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); +- fscache_stat(&fscache_n_object_avail); ++ fscache_stat_unchecked(&fscache_n_object_avail); + + _leave(""); + } +@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + enum fscache_checkaux result; + + if (!object->cookie->def->check_aux) { +- fscache_stat(&fscache_n_checkaux_none); ++ fscache_stat_unchecked(&fscache_n_checkaux_none); + return FSCACHE_CHECKAUX_OKAY; + } + +@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + switch (result) { + /* entry okay as is */ + case FSCACHE_CHECKAUX_OKAY: +- fscache_stat(&fscache_n_checkaux_okay); ++ fscache_stat_unchecked(&fscache_n_checkaux_okay); + break; + + /* entry requires update */ + case FSCACHE_CHECKAUX_NEEDS_UPDATE: +- fscache_stat(&fscache_n_checkaux_update); ++ fscache_stat_unchecked(&fscache_n_checkaux_update); + break; + + /* entry requires deletion */ + case FSCACHE_CHECKAUX_OBSOLETE: +- fscache_stat(&fscache_n_checkaux_obsolete); ++ fscache_stat_unchecked(&fscache_n_checkaux_obsolete); + break; + + default: +diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c +index 313e79a..775240f 100644 +--- a/fs/fscache/operation.c ++++ b/fs/fscache/operation.c +@@ -16,7 +16,7 @@ + #include <linux/seq_file.h> + #include "internal.h" + +-atomic_t fscache_op_debug_id; ++atomic_unchecked_t fscache_op_debug_id; + EXPORT_SYMBOL(fscache_op_debug_id); + + /** +@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) + ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); + ASSERTCMP(atomic_read(&op->usage), >, 0); + +- fscache_stat(&fscache_n_op_enqueue); ++ fscache_stat_unchecked(&fscache_n_op_enqueue); + switch (op->flags & FSCACHE_OP_TYPE) { + case FSCACHE_OP_FAST: + _debug("queue fast"); +@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object, + wake_up_bit(&op->flags, FSCACHE_OP_WAITING); + if (op->processor) + fscache_enqueue_operation(op); +- fscache_stat(&fscache_n_op_run); ++ fscache_stat_unchecked(&fscache_n_op_run); + } + + /* +@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object, + if (object->n_ops > 0) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + } else if (!list_empty(&object->pending_ops)) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + fscache_start_operations(object); + } else { + ASSERTCMP(object->n_in_progress, ==, 0); +@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, + object->n_exclusive++; /* reads and writes must wait */ + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + ret = 0; + } else { + /* not allowed to submit ops in any other state */ +@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object, + if (object->n_exclusive > 0) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + } else if (!list_empty(&object->pending_ops)) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + fscache_start_operations(object); + } else { + ASSERTCMP(object->n_exclusive, ==, 0); +@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object, + object->n_ops++; + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + ret = 0; + } else if (object->state == FSCACHE_OBJECT_DYING || + object->state == FSCACHE_OBJECT_LC_DYING || + object->state == FSCACHE_OBJECT_WITHDRAWING) { +- fscache_stat(&fscache_n_op_rejected); ++ fscache_stat_unchecked(&fscache_n_op_rejected); + ret = -ENOBUFS; + } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { + fscache_report_unexpected_submission(object, op, ostate); +@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op) + + ret = -EBUSY; + if (!list_empty(&op->pend_link)) { +- fscache_stat(&fscache_n_op_cancelled); ++ fscache_stat_unchecked(&fscache_n_op_cancelled); + list_del_init(&op->pend_link); + object->n_ops--; + if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) +@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op) + if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) + BUG(); + +- fscache_stat(&fscache_n_op_release); ++ fscache_stat_unchecked(&fscache_n_op_release); + + if (op->release) { + op->release(op); +@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op) + * lock, and defer it otherwise */ + if (!spin_trylock(&object->lock)) { + _debug("defer put"); +- fscache_stat(&fscache_n_op_deferred_release); ++ fscache_stat_unchecked(&fscache_n_op_deferred_release); + + cache = object->cache; + spin_lock(&cache->op_gc_list_lock); +@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work) + + _debug("GC DEFERRED REL OBJ%x OP%x", + object->debug_id, op->debug_id); +- fscache_stat(&fscache_n_op_gc); ++ fscache_stat_unchecked(&fscache_n_op_gc); + + ASSERTCMP(atomic_read(&op->usage), ==, 0); + +diff --git a/fs/fscache/page.c b/fs/fscache/page.c +index c598ea4..6aac13e 100644 +--- a/fs/fscache/page.c ++++ b/fs/fscache/page.c +@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, + val = radix_tree_lookup(&cookie->stores, page->index); + if (!val) { + rcu_read_unlock(); +- fscache_stat(&fscache_n_store_vmscan_not_storing); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing); + __fscache_uncache_page(cookie, page); + return true; + } +@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, + spin_unlock(&cookie->stores_lock); + + if (xpage) { +- fscache_stat(&fscache_n_store_vmscan_cancelled); +- fscache_stat(&fscache_n_store_radix_deletes); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled); ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes); + ASSERTCMP(xpage, ==, page); + } else { +- fscache_stat(&fscache_n_store_vmscan_gone); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_gone); + } + + wake_up_bit(&cookie->flags, 0); +@@ -106,7 +106,7 @@ page_busy: + /* we might want to wait here, but that could deadlock the allocator as + * the slow-work threads writing to the cache may all end up sleeping + * on memory allocation */ +- fscache_stat(&fscache_n_store_vmscan_busy); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy); + return false; + } + EXPORT_SYMBOL(__fscache_maybe_release_page); +@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object, + FSCACHE_COOKIE_STORING_TAG); + if (!radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_PENDING_TAG)) { +- fscache_stat(&fscache_n_store_radix_deletes); ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes); + xpage = radix_tree_delete(&cookie->stores, page->index); + } + spin_unlock(&cookie->stores_lock); +@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op) + + _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); + +- fscache_stat(&fscache_n_attr_changed_calls); ++ fscache_stat_unchecked(&fscache_n_attr_changed_calls); + + if (fscache_object_is_active(object)) { + fscache_set_op_state(op, "CallFS"); +@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + +- fscache_stat(&fscache_n_attr_changed); ++ fscache_stat_unchecked(&fscache_n_attr_changed); + + op = kzalloc(sizeof(*op), GFP_KERNEL); + if (!op) { +- fscache_stat(&fscache_n_attr_changed_nomem); ++ fscache_stat_unchecked(&fscache_n_attr_changed_nomem); + _leave(" = -ENOMEM"); + return -ENOMEM; + } +@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + if (fscache_submit_exclusive_op(object, op) < 0) + goto nobufs; + spin_unlock(&cookie->lock); +- fscache_stat(&fscache_n_attr_changed_ok); ++ fscache_stat_unchecked(&fscache_n_attr_changed_ok); + fscache_put_operation(op); + _leave(" = 0"); + return 0; +@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + nobufs: + spin_unlock(&cookie->lock); + kfree(op); +- fscache_stat(&fscache_n_attr_changed_nobufs); ++ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs); + _leave(" = %d", -ENOBUFS); + return -ENOBUFS; + } +@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval( + /* allocate a retrieval operation and attempt to submit it */ + op = kzalloc(sizeof(*op), GFP_NOIO); + if (!op) { +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + return NULL; + } + +@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) + return 0; + } + +- fscache_stat(&fscache_n_retrievals_wait); ++ fscache_stat_unchecked(&fscache_n_retrievals_wait); + + jif = jiffies; + if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) != 0) { +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + _leave(" = -ERESTARTSYS"); + return -ERESTARTSYS; + } +@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) + */ + static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + struct fscache_retrieval *op, +- atomic_t *stat_op_waits, +- atomic_t *stat_object_dead) ++ atomic_unchecked_t *stat_op_waits, ++ atomic_unchecked_t *stat_object_dead) + { + int ret; + +@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + goto check_if_dead; + + _debug(">>> WT"); +- fscache_stat(stat_op_waits); ++ fscache_stat_unchecked(stat_op_waits); + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) < 0) { +@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + + check_if_dead: + if (unlikely(fscache_object_is_dead(object))) { +- fscache_stat(stat_object_dead); ++ fscache_stat_unchecked(stat_object_dead); + return -ENOBUFS; + } + return 0; +@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + + _enter("%p,%p,,,", cookie, page); + +- fscache_stat(&fscache_n_retrievals); ++ fscache_stat_unchecked(&fscache_n_retrievals); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + goto nobufs_unlock; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_retrieval_ops); ++ fscache_stat_unchecked(&fscache_n_retrieval_ops); + + /* pin the netfs read context in case we need to do the actual netfs + * read because we've encountered a cache read failure */ +@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + + error: + if (ret == -ENOMEM) +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + else if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + else if (ret == -ENODATA) +- fscache_stat(&fscache_n_retrievals_nodata); ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata); + else if (ret < 0) +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + else +- fscache_stat(&fscache_n_retrievals_ok); ++ fscache_stat_unchecked(&fscache_n_retrievals_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -453,7 +453,7 @@ nobufs_unlock: + spin_unlock(&cookie->lock); + kfree(op); + nobufs: +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + + _enter("%p,,%d,,,", cookie, *nr_pages); + +- fscache_stat(&fscache_n_retrievals); ++ fscache_stat_unchecked(&fscache_n_retrievals); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + goto nobufs_unlock; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_retrieval_ops); ++ fscache_stat_unchecked(&fscache_n_retrieval_ops); + + /* pin the netfs read context in case we need to do the actual netfs + * read because we've encountered a cache read failure */ +@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + + error: + if (ret == -ENOMEM) +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + else if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + else if (ret == -ENODATA) +- fscache_stat(&fscache_n_retrievals_nodata); ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata); + else if (ret < 0) +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + else +- fscache_stat(&fscache_n_retrievals_ok); ++ fscache_stat_unchecked(&fscache_n_retrievals_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -570,7 +570,7 @@ nobufs_unlock: + spin_unlock(&cookie->lock); + kfree(op); + nobufs: +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + + _enter("%p,%p,,,", cookie, page); + +- fscache_stat(&fscache_n_allocs); ++ fscache_stat_unchecked(&fscache_n_allocs); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + goto nobufs_unlock; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_alloc_ops); ++ fscache_stat_unchecked(&fscache_n_alloc_ops); + + ret = fscache_wait_for_retrieval_activation( + object, op, +@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + + error: + if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_allocs_intr); ++ fscache_stat_unchecked(&fscache_n_allocs_intr); + else if (ret < 0) +- fscache_stat(&fscache_n_allocs_nobufs); ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs); + else +- fscache_stat(&fscache_n_allocs_ok); ++ fscache_stat_unchecked(&fscache_n_allocs_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -651,7 +651,7 @@ nobufs_unlock: + spin_unlock(&cookie->lock); + kfree(op); + nobufs: +- fscache_stat(&fscache_n_allocs_nobufs); ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op) + + spin_lock(&cookie->stores_lock); + +- fscache_stat(&fscache_n_store_calls); ++ fscache_stat_unchecked(&fscache_n_store_calls); + + /* find a page to store */ + page = NULL; +@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op) + page = results[0]; + _debug("gang %d [%lx]", n, page->index); + if (page->index > op->store_limit) { +- fscache_stat(&fscache_n_store_pages_over_limit); ++ fscache_stat_unchecked(&fscache_n_store_pages_over_limit); + goto superseded; + } + +@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op) + + if (page) { + fscache_set_op_state(&op->op, "Store"); +- fscache_stat(&fscache_n_store_pages); ++ fscache_stat_unchecked(&fscache_n_store_pages); + fscache_stat(&fscache_n_cop_write_page); + ret = object->cache->ops->write_page(op, page); + fscache_stat_d(&fscache_n_cop_write_page); +@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + ASSERT(PageFsCache(page)); + +- fscache_stat(&fscache_n_stores); ++ fscache_stat_unchecked(&fscache_n_stores); + + op = kzalloc(sizeof(*op), GFP_NOIO); + if (!op) +@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + +- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); ++ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); + op->store_limit = object->store_limit; + + if (fscache_submit_op(object, &op->op) < 0) +@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie, + + spin_unlock(&cookie->lock); + radix_tree_preload_end(); +- fscache_stat(&fscache_n_store_ops); +- fscache_stat(&fscache_n_stores_ok); ++ fscache_stat_unchecked(&fscache_n_store_ops); ++ fscache_stat_unchecked(&fscache_n_stores_ok); + + /* the slow work queue now carries its own ref on the object */ + fscache_put_operation(&op->op); +@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie, + return 0; + + already_queued: +- fscache_stat(&fscache_n_stores_again); ++ fscache_stat_unchecked(&fscache_n_stores_again); + already_pending: + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + spin_unlock(&cookie->lock); + radix_tree_preload_end(); + kfree(op); +- fscache_stat(&fscache_n_stores_ok); ++ fscache_stat_unchecked(&fscache_n_stores_ok); + _leave(" = 0"); + return 0; + +@@ -886,14 +886,14 @@ nobufs: + spin_unlock(&cookie->lock); + radix_tree_preload_end(); + kfree(op); +- fscache_stat(&fscache_n_stores_nobufs); ++ fscache_stat_unchecked(&fscache_n_stores_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + + nomem_free: + kfree(op); + nomem: +- fscache_stat(&fscache_n_stores_oom); ++ fscache_stat_unchecked(&fscache_n_stores_oom); + _leave(" = -ENOMEM"); + return -ENOMEM; + } +@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + ASSERTCMP(page, !=, NULL); + +- fscache_stat(&fscache_n_uncaches); ++ fscache_stat_unchecked(&fscache_n_uncaches); + + /* cache withdrawal may beat us to it */ + if (!PageFsCache(page)) +@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op, + unsigned long loop; + + #ifdef CONFIG_FSCACHE_STATS +- atomic_add(pagevec->nr, &fscache_n_marks); ++ atomic_add_unchecked(pagevec->nr, &fscache_n_marks); + #endif + + for (loop = 0; loop < pagevec->nr; loop++) { +diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c +index 46435f3a..8cddf18 100644 +--- a/fs/fscache/stats.c ++++ b/fs/fscache/stats.c +@@ -18,95 +18,95 @@ + /* + * operation counters + */ +-atomic_t fscache_n_op_pend; +-atomic_t fscache_n_op_run; +-atomic_t fscache_n_op_enqueue; +-atomic_t fscache_n_op_requeue; +-atomic_t fscache_n_op_deferred_release; +-atomic_t fscache_n_op_release; +-atomic_t fscache_n_op_gc; +-atomic_t fscache_n_op_cancelled; +-atomic_t fscache_n_op_rejected; ++atomic_unchecked_t fscache_n_op_pend; ++atomic_unchecked_t fscache_n_op_run; ++atomic_unchecked_t fscache_n_op_enqueue; ++atomic_unchecked_t fscache_n_op_requeue; ++atomic_unchecked_t fscache_n_op_deferred_release; ++atomic_unchecked_t fscache_n_op_release; ++atomic_unchecked_t fscache_n_op_gc; ++atomic_unchecked_t fscache_n_op_cancelled; ++atomic_unchecked_t fscache_n_op_rejected; + +-atomic_t fscache_n_attr_changed; +-atomic_t fscache_n_attr_changed_ok; +-atomic_t fscache_n_attr_changed_nobufs; +-atomic_t fscache_n_attr_changed_nomem; +-atomic_t fscache_n_attr_changed_calls; ++atomic_unchecked_t fscache_n_attr_changed; ++atomic_unchecked_t fscache_n_attr_changed_ok; ++atomic_unchecked_t fscache_n_attr_changed_nobufs; ++atomic_unchecked_t fscache_n_attr_changed_nomem; ++atomic_unchecked_t fscache_n_attr_changed_calls; + +-atomic_t fscache_n_allocs; +-atomic_t fscache_n_allocs_ok; +-atomic_t fscache_n_allocs_wait; +-atomic_t fscache_n_allocs_nobufs; +-atomic_t fscache_n_allocs_intr; +-atomic_t fscache_n_allocs_object_dead; +-atomic_t fscache_n_alloc_ops; +-atomic_t fscache_n_alloc_op_waits; ++atomic_unchecked_t fscache_n_allocs; ++atomic_unchecked_t fscache_n_allocs_ok; ++atomic_unchecked_t fscache_n_allocs_wait; ++atomic_unchecked_t fscache_n_allocs_nobufs; ++atomic_unchecked_t fscache_n_allocs_intr; ++atomic_unchecked_t fscache_n_allocs_object_dead; ++atomic_unchecked_t fscache_n_alloc_ops; ++atomic_unchecked_t fscache_n_alloc_op_waits; + +-atomic_t fscache_n_retrievals; +-atomic_t fscache_n_retrievals_ok; +-atomic_t fscache_n_retrievals_wait; +-atomic_t fscache_n_retrievals_nodata; +-atomic_t fscache_n_retrievals_nobufs; +-atomic_t fscache_n_retrievals_intr; +-atomic_t fscache_n_retrievals_nomem; +-atomic_t fscache_n_retrievals_object_dead; +-atomic_t fscache_n_retrieval_ops; +-atomic_t fscache_n_retrieval_op_waits; ++atomic_unchecked_t fscache_n_retrievals; ++atomic_unchecked_t fscache_n_retrievals_ok; ++atomic_unchecked_t fscache_n_retrievals_wait; ++atomic_unchecked_t fscache_n_retrievals_nodata; ++atomic_unchecked_t fscache_n_retrievals_nobufs; ++atomic_unchecked_t fscache_n_retrievals_intr; ++atomic_unchecked_t fscache_n_retrievals_nomem; ++atomic_unchecked_t fscache_n_retrievals_object_dead; ++atomic_unchecked_t fscache_n_retrieval_ops; ++atomic_unchecked_t fscache_n_retrieval_op_waits; + +-atomic_t fscache_n_stores; +-atomic_t fscache_n_stores_ok; +-atomic_t fscache_n_stores_again; +-atomic_t fscache_n_stores_nobufs; +-atomic_t fscache_n_stores_oom; +-atomic_t fscache_n_store_ops; +-atomic_t fscache_n_store_calls; +-atomic_t fscache_n_store_pages; +-atomic_t fscache_n_store_radix_deletes; +-atomic_t fscache_n_store_pages_over_limit; ++atomic_unchecked_t fscache_n_stores; ++atomic_unchecked_t fscache_n_stores_ok; ++atomic_unchecked_t fscache_n_stores_again; ++atomic_unchecked_t fscache_n_stores_nobufs; ++atomic_unchecked_t fscache_n_stores_oom; ++atomic_unchecked_t fscache_n_store_ops; ++atomic_unchecked_t fscache_n_store_calls; ++atomic_unchecked_t fscache_n_store_pages; ++atomic_unchecked_t fscache_n_store_radix_deletes; ++atomic_unchecked_t fscache_n_store_pages_over_limit; + +-atomic_t fscache_n_store_vmscan_not_storing; +-atomic_t fscache_n_store_vmscan_gone; +-atomic_t fscache_n_store_vmscan_busy; +-atomic_t fscache_n_store_vmscan_cancelled; ++atomic_unchecked_t fscache_n_store_vmscan_not_storing; ++atomic_unchecked_t fscache_n_store_vmscan_gone; ++atomic_unchecked_t fscache_n_store_vmscan_busy; ++atomic_unchecked_t fscache_n_store_vmscan_cancelled; + +-atomic_t fscache_n_marks; +-atomic_t fscache_n_uncaches; ++atomic_unchecked_t fscache_n_marks; ++atomic_unchecked_t fscache_n_uncaches; + +-atomic_t fscache_n_acquires; +-atomic_t fscache_n_acquires_null; +-atomic_t fscache_n_acquires_no_cache; +-atomic_t fscache_n_acquires_ok; +-atomic_t fscache_n_acquires_nobufs; +-atomic_t fscache_n_acquires_oom; ++atomic_unchecked_t fscache_n_acquires; ++atomic_unchecked_t fscache_n_acquires_null; ++atomic_unchecked_t fscache_n_acquires_no_cache; ++atomic_unchecked_t fscache_n_acquires_ok; ++atomic_unchecked_t fscache_n_acquires_nobufs; ++atomic_unchecked_t fscache_n_acquires_oom; + +-atomic_t fscache_n_updates; +-atomic_t fscache_n_updates_null; +-atomic_t fscache_n_updates_run; ++atomic_unchecked_t fscache_n_updates; ++atomic_unchecked_t fscache_n_updates_null; ++atomic_unchecked_t fscache_n_updates_run; + +-atomic_t fscache_n_relinquishes; +-atomic_t fscache_n_relinquishes_null; +-atomic_t fscache_n_relinquishes_waitcrt; +-atomic_t fscache_n_relinquishes_retire; ++atomic_unchecked_t fscache_n_relinquishes; ++atomic_unchecked_t fscache_n_relinquishes_null; ++atomic_unchecked_t fscache_n_relinquishes_waitcrt; ++atomic_unchecked_t fscache_n_relinquishes_retire; + +-atomic_t fscache_n_cookie_index; +-atomic_t fscache_n_cookie_data; +-atomic_t fscache_n_cookie_special; ++atomic_unchecked_t fscache_n_cookie_index; ++atomic_unchecked_t fscache_n_cookie_data; ++atomic_unchecked_t fscache_n_cookie_special; + +-atomic_t fscache_n_object_alloc; +-atomic_t fscache_n_object_no_alloc; +-atomic_t fscache_n_object_lookups; +-atomic_t fscache_n_object_lookups_negative; +-atomic_t fscache_n_object_lookups_positive; +-atomic_t fscache_n_object_lookups_timed_out; +-atomic_t fscache_n_object_created; +-atomic_t fscache_n_object_avail; +-atomic_t fscache_n_object_dead; ++atomic_unchecked_t fscache_n_object_alloc; ++atomic_unchecked_t fscache_n_object_no_alloc; ++atomic_unchecked_t fscache_n_object_lookups; ++atomic_unchecked_t fscache_n_object_lookups_negative; ++atomic_unchecked_t fscache_n_object_lookups_positive; ++atomic_unchecked_t fscache_n_object_lookups_timed_out; ++atomic_unchecked_t fscache_n_object_created; ++atomic_unchecked_t fscache_n_object_avail; ++atomic_unchecked_t fscache_n_object_dead; + +-atomic_t fscache_n_checkaux_none; +-atomic_t fscache_n_checkaux_okay; +-atomic_t fscache_n_checkaux_update; +-atomic_t fscache_n_checkaux_obsolete; ++atomic_unchecked_t fscache_n_checkaux_none; ++atomic_unchecked_t fscache_n_checkaux_okay; ++atomic_unchecked_t fscache_n_checkaux_update; ++atomic_unchecked_t fscache_n_checkaux_obsolete; + + atomic_t fscache_n_cop_alloc_object; + atomic_t fscache_n_cop_lookup_object; +@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v) + seq_puts(m, "FS-Cache statistics\n"); + + seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", +- atomic_read(&fscache_n_cookie_index), +- atomic_read(&fscache_n_cookie_data), +- atomic_read(&fscache_n_cookie_special)); ++ atomic_read_unchecked(&fscache_n_cookie_index), ++ atomic_read_unchecked(&fscache_n_cookie_data), ++ atomic_read_unchecked(&fscache_n_cookie_special)); + + seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", +- atomic_read(&fscache_n_object_alloc), +- atomic_read(&fscache_n_object_no_alloc), +- atomic_read(&fscache_n_object_avail), +- atomic_read(&fscache_n_object_dead)); ++ atomic_read_unchecked(&fscache_n_object_alloc), ++ atomic_read_unchecked(&fscache_n_object_no_alloc), ++ atomic_read_unchecked(&fscache_n_object_avail), ++ atomic_read_unchecked(&fscache_n_object_dead)); + seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", +- atomic_read(&fscache_n_checkaux_none), +- atomic_read(&fscache_n_checkaux_okay), +- atomic_read(&fscache_n_checkaux_update), +- atomic_read(&fscache_n_checkaux_obsolete)); ++ atomic_read_unchecked(&fscache_n_checkaux_none), ++ atomic_read_unchecked(&fscache_n_checkaux_okay), ++ atomic_read_unchecked(&fscache_n_checkaux_update), ++ atomic_read_unchecked(&fscache_n_checkaux_obsolete)); + + seq_printf(m, "Pages : mrk=%u unc=%u\n", +- atomic_read(&fscache_n_marks), +- atomic_read(&fscache_n_uncaches)); ++ atomic_read_unchecked(&fscache_n_marks), ++ atomic_read_unchecked(&fscache_n_uncaches)); + + seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" + " oom=%u\n", +- atomic_read(&fscache_n_acquires), +- atomic_read(&fscache_n_acquires_null), +- atomic_read(&fscache_n_acquires_no_cache), +- atomic_read(&fscache_n_acquires_ok), +- atomic_read(&fscache_n_acquires_nobufs), +- atomic_read(&fscache_n_acquires_oom)); ++ atomic_read_unchecked(&fscache_n_acquires), ++ atomic_read_unchecked(&fscache_n_acquires_null), ++ atomic_read_unchecked(&fscache_n_acquires_no_cache), ++ atomic_read_unchecked(&fscache_n_acquires_ok), ++ atomic_read_unchecked(&fscache_n_acquires_nobufs), ++ atomic_read_unchecked(&fscache_n_acquires_oom)); + + seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", +- atomic_read(&fscache_n_object_lookups), +- atomic_read(&fscache_n_object_lookups_negative), +- atomic_read(&fscache_n_object_lookups_positive), +- atomic_read(&fscache_n_object_lookups_timed_out), +- atomic_read(&fscache_n_object_created)); ++ atomic_read_unchecked(&fscache_n_object_lookups), ++ atomic_read_unchecked(&fscache_n_object_lookups_negative), ++ atomic_read_unchecked(&fscache_n_object_lookups_positive), ++ atomic_read_unchecked(&fscache_n_object_lookups_timed_out), ++ atomic_read_unchecked(&fscache_n_object_created)); + + seq_printf(m, "Updates: n=%u nul=%u run=%u\n", +- atomic_read(&fscache_n_updates), +- atomic_read(&fscache_n_updates_null), +- atomic_read(&fscache_n_updates_run)); ++ atomic_read_unchecked(&fscache_n_updates), ++ atomic_read_unchecked(&fscache_n_updates_null), ++ atomic_read_unchecked(&fscache_n_updates_run)); + + seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", +- atomic_read(&fscache_n_relinquishes), +- atomic_read(&fscache_n_relinquishes_null), +- atomic_read(&fscache_n_relinquishes_waitcrt), +- atomic_read(&fscache_n_relinquishes_retire)); ++ atomic_read_unchecked(&fscache_n_relinquishes), ++ atomic_read_unchecked(&fscache_n_relinquishes_null), ++ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt), ++ atomic_read_unchecked(&fscache_n_relinquishes_retire)); + + seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", +- atomic_read(&fscache_n_attr_changed), +- atomic_read(&fscache_n_attr_changed_ok), +- atomic_read(&fscache_n_attr_changed_nobufs), +- atomic_read(&fscache_n_attr_changed_nomem), +- atomic_read(&fscache_n_attr_changed_calls)); ++ atomic_read_unchecked(&fscache_n_attr_changed), ++ atomic_read_unchecked(&fscache_n_attr_changed_ok), ++ atomic_read_unchecked(&fscache_n_attr_changed_nobufs), ++ atomic_read_unchecked(&fscache_n_attr_changed_nomem), ++ atomic_read_unchecked(&fscache_n_attr_changed_calls)); + + seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", +- atomic_read(&fscache_n_allocs), +- atomic_read(&fscache_n_allocs_ok), +- atomic_read(&fscache_n_allocs_wait), +- atomic_read(&fscache_n_allocs_nobufs), +- atomic_read(&fscache_n_allocs_intr)); ++ atomic_read_unchecked(&fscache_n_allocs), ++ atomic_read_unchecked(&fscache_n_allocs_ok), ++ atomic_read_unchecked(&fscache_n_allocs_wait), ++ atomic_read_unchecked(&fscache_n_allocs_nobufs), ++ atomic_read_unchecked(&fscache_n_allocs_intr)); + seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", +- atomic_read(&fscache_n_alloc_ops), +- atomic_read(&fscache_n_alloc_op_waits), +- atomic_read(&fscache_n_allocs_object_dead)); ++ atomic_read_unchecked(&fscache_n_alloc_ops), ++ atomic_read_unchecked(&fscache_n_alloc_op_waits), ++ atomic_read_unchecked(&fscache_n_allocs_object_dead)); + + seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" + " int=%u oom=%u\n", +- atomic_read(&fscache_n_retrievals), +- atomic_read(&fscache_n_retrievals_ok), +- atomic_read(&fscache_n_retrievals_wait), +- atomic_read(&fscache_n_retrievals_nodata), +- atomic_read(&fscache_n_retrievals_nobufs), +- atomic_read(&fscache_n_retrievals_intr), +- atomic_read(&fscache_n_retrievals_nomem)); ++ atomic_read_unchecked(&fscache_n_retrievals), ++ atomic_read_unchecked(&fscache_n_retrievals_ok), ++ atomic_read_unchecked(&fscache_n_retrievals_wait), ++ atomic_read_unchecked(&fscache_n_retrievals_nodata), ++ atomic_read_unchecked(&fscache_n_retrievals_nobufs), ++ atomic_read_unchecked(&fscache_n_retrievals_intr), ++ atomic_read_unchecked(&fscache_n_retrievals_nomem)); + seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", +- atomic_read(&fscache_n_retrieval_ops), +- atomic_read(&fscache_n_retrieval_op_waits), +- atomic_read(&fscache_n_retrievals_object_dead)); ++ atomic_read_unchecked(&fscache_n_retrieval_ops), ++ atomic_read_unchecked(&fscache_n_retrieval_op_waits), ++ atomic_read_unchecked(&fscache_n_retrievals_object_dead)); + + seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", +- atomic_read(&fscache_n_stores), +- atomic_read(&fscache_n_stores_ok), +- atomic_read(&fscache_n_stores_again), +- atomic_read(&fscache_n_stores_nobufs), +- atomic_read(&fscache_n_stores_oom)); ++ atomic_read_unchecked(&fscache_n_stores), ++ atomic_read_unchecked(&fscache_n_stores_ok), ++ atomic_read_unchecked(&fscache_n_stores_again), ++ atomic_read_unchecked(&fscache_n_stores_nobufs), ++ atomic_read_unchecked(&fscache_n_stores_oom)); + seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", +- atomic_read(&fscache_n_store_ops), +- atomic_read(&fscache_n_store_calls), +- atomic_read(&fscache_n_store_pages), +- atomic_read(&fscache_n_store_radix_deletes), +- atomic_read(&fscache_n_store_pages_over_limit)); ++ atomic_read_unchecked(&fscache_n_store_ops), ++ atomic_read_unchecked(&fscache_n_store_calls), ++ atomic_read_unchecked(&fscache_n_store_pages), ++ atomic_read_unchecked(&fscache_n_store_radix_deletes), ++ atomic_read_unchecked(&fscache_n_store_pages_over_limit)); + + seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n", +- atomic_read(&fscache_n_store_vmscan_not_storing), +- atomic_read(&fscache_n_store_vmscan_gone), +- atomic_read(&fscache_n_store_vmscan_busy), +- atomic_read(&fscache_n_store_vmscan_cancelled)); ++ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing), ++ atomic_read_unchecked(&fscache_n_store_vmscan_gone), ++ atomic_read_unchecked(&fscache_n_store_vmscan_busy), ++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled)); + + seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", +- atomic_read(&fscache_n_op_pend), +- atomic_read(&fscache_n_op_run), +- atomic_read(&fscache_n_op_enqueue), +- atomic_read(&fscache_n_op_cancelled), +- atomic_read(&fscache_n_op_rejected)); ++ atomic_read_unchecked(&fscache_n_op_pend), ++ atomic_read_unchecked(&fscache_n_op_run), ++ atomic_read_unchecked(&fscache_n_op_enqueue), ++ atomic_read_unchecked(&fscache_n_op_cancelled), ++ atomic_read_unchecked(&fscache_n_op_rejected)); + seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", +- atomic_read(&fscache_n_op_deferred_release), +- atomic_read(&fscache_n_op_release), +- atomic_read(&fscache_n_op_gc)); ++ atomic_read_unchecked(&fscache_n_op_deferred_release), ++ atomic_read_unchecked(&fscache_n_op_release), ++ atomic_read_unchecked(&fscache_n_op_gc)); + + seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", + atomic_read(&fscache_n_cop_alloc_object), +diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c +index de792dc..448b532 100644 +--- a/fs/fuse/cuse.c ++++ b/fs/fuse/cuse.c +@@ -576,10 +576,12 @@ static int __init cuse_init(void) + INIT_LIST_HEAD(&cuse_conntbl[i]); + + /* inherit and extend fuse_dev_operations */ +- cuse_channel_fops = fuse_dev_operations; +- cuse_channel_fops.owner = THIS_MODULE; +- cuse_channel_fops.open = cuse_channel_open; +- cuse_channel_fops.release = cuse_channel_release; ++ pax_open_kernel(); ++ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations)); ++ *(void **)&cuse_channel_fops.owner = THIS_MODULE; ++ *(void **)&cuse_channel_fops.open = cuse_channel_open; ++ *(void **)&cuse_channel_fops.release = cuse_channel_release; ++ pax_close_kernel(); + + cuse_class = class_create(THIS_MODULE, "cuse"); + if (IS_ERR(cuse_class)) +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 1facb39..7f48557 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, + { + struct fuse_notify_inval_entry_out outarg; + int err = -EINVAL; +- char buf[FUSE_NAME_MAX+1]; ++ char *buf = NULL; + struct qstr name; + + if (size < sizeof(outarg)) +@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, + if (outarg.namelen > FUSE_NAME_MAX) + goto err; + ++ err = -ENOMEM; ++ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL); ++ if (!buf) ++ goto err; ++ + err = -EINVAL; + if (size != sizeof(outarg) + outarg.namelen + 1) + goto err; +@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, + + down_read(&fc->killsb); + err = -ENOENT; +- if (!fc->sb) +- goto err_unlock; +- +- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); +- +-err_unlock: ++ if (fc->sb) ++ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); + up_read(&fc->killsb); ++ kfree(buf); + return err; + + err: + fuse_copy_finish(cs); ++ kfree(buf); + return err; + } + +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index 4787ae6..73efff7 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry) + return link; + } + +-static void free_link(char *link) ++static void free_link(const char *link) + { + if (!IS_ERR(link)) + free_page((unsigned long) link); +diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c +index 247436c..e650ccb 100644 +--- a/fs/gfs2/ops_inode.c ++++ b/fs/gfs2/ops_inode.c +@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, + unsigned int x; + int error; + ++ pax_track_stack(); ++ + if (ndentry->d_inode) { + nip = GFS2_I(ndentry->d_inode); + if (ip == nip) +diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c +index 4463297..4fed53b 100644 +--- a/fs/gfs2/sys.c ++++ b/fs/gfs2/sys.c +@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr, + return a->store ? a->store(sdp, buf, len) : len; + } + +-static struct sysfs_ops gfs2_attr_ops = { ++static const struct sysfs_ops gfs2_attr_ops = { + .show = gfs2_attr_show, + .store = gfs2_attr_store, + }; +@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj, + return 0; + } + +-static struct kset_uevent_ops gfs2_uevent_ops = { ++static const struct kset_uevent_ops gfs2_uevent_ops = { + .uevent = gfs2_uevent, + }; + +diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c +index f6874ac..46904bd 100644 +--- a/fs/hfsplus/catalog.c ++++ b/fs/hfsplus/catalog.c +@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid, + int err; + u16 type; + ++ pax_track_stack(); ++ + hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL); + err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry)); + if (err) +@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino + int entry_size; + int err; + ++ pax_track_stack(); ++ + dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); + sb = dir->i_sb; + hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); +@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid, + int entry_size, type; + int err = 0; + ++ pax_track_stack(); ++ + dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, + dst_dir->i_ino, dst_name->name); + sb = src_dir->i_sb; +@@ -329,6 +335,10 @@ int hfsplus_rename_cat(u32 cnid, + err = hfs_brec_find(&src_fd); + if (err) + goto out; ++ if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) { ++ err = -EIO; ++ goto out; ++ } + + hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, + src_fd.entrylength); +diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c +index 5f40236..6ec38b2 100644 +--- a/fs/hfsplus/dir.c ++++ b/fs/hfsplus/dir.c +@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) + struct hfsplus_readdir_data *rd; + u16 type; + ++ pax_track_stack(); ++ + if (filp->f_pos >= inode->i_size) + return 0; + +@@ -138,6 +140,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) + filp->f_pos++; + /* fall through */ + case 1: ++ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { ++ err = -EIO; ++ goto out; ++ } ++ + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); + if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { + printk(KERN_ERR "hfs: bad catalog folder thread\n"); +@@ -168,6 +175,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) + err = -EIO; + goto out; + } ++ ++ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { ++ err = -EIO; ++ goto out; ++ } ++ + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); + type = be16_to_cpu(entry.type); + len = HFSPLUS_MAX_STRLEN; +diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c +index 1bcf597..905a251 100644 +--- a/fs/hfsplus/inode.c ++++ b/fs/hfsplus/inode.c +@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) + int res = 0; + u16 type; + ++ pax_track_stack(); ++ + type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); + + HFSPLUS_I(inode).dev = 0; +@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode) + struct hfs_find_data fd; + hfsplus_cat_entry entry; + ++ pax_track_stack(); ++ + if (HFSPLUS_IS_RSRC(inode)) + main_inode = HFSPLUS_I(inode).rsrc_inode; + +diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c +index f457d2c..7ef4ad5 100644 +--- a/fs/hfsplus/ioctl.c ++++ b/fs/hfsplus/ioctl.c +@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name, + struct hfsplus_cat_file *file; + int res; + ++ pax_track_stack(); ++ + if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode)) + return -EOPNOTSUPP; + +@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, + struct hfsplus_cat_file *file; + ssize_t res = 0; + ++ pax_track_stack(); ++ + if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode)) + return -EOPNOTSUPP; + +diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c +index 43022f3..7298079 100644 +--- a/fs/hfsplus/super.c ++++ b/fs/hfsplus/super.c +@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) + struct nls_table *nls = NULL; + int err = -EINVAL; + ++ pax_track_stack(); ++ + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + if (!sbi) + return -ENOMEM; +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 87a1258..5694d91 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = { + .kill_sb = kill_litter_super, + }; + +-static struct vfsmount *hugetlbfs_vfsmount; ++struct vfsmount *hugetlbfs_vfsmount; + + static int can_do_hugetlb_shm(void) + { +diff --git a/fs/ioctl.c b/fs/ioctl.c +index 6c75110..19d2c3c 100644 +--- a/fs/ioctl.c ++++ b/fs/ioctl.c +@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical, + u64 phys, u64 len, u32 flags) + { + struct fiemap_extent extent; +- struct fiemap_extent *dest = fieinfo->fi_extents_start; ++ struct fiemap_extent __user *dest = fieinfo->fi_extents_start; + + /* only count the extents */ + if (fieinfo->fi_extents_max == 0) { +@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) + + fieinfo.fi_flags = fiemap.fm_flags; + fieinfo.fi_extents_max = fiemap.fm_extent_count; +- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap)); ++ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap)); + + if (fiemap.fm_extent_count != 0 && + !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start, +@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) + error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len); + fiemap.fm_flags = fieinfo.fi_flags; + fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; +- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap))) ++ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap))) + error = -EFAULT; + + return error; +diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c +index b0435dd..81ee0be 100644 +--- a/fs/jbd/checkpoint.c ++++ b/fs/jbd/checkpoint.c +@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal) + tid_t this_tid; + int result; + ++ pax_track_stack(); ++ + jbd_debug(1, "Start checkpoint\n"); + + /* +diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c +index 546d153..736896c 100644 +--- a/fs/jffs2/compr_rtime.c ++++ b/fs/jffs2/compr_rtime.c +@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in, + int outpos = 0; + int pos=0; + ++ pax_track_stack(); ++ + memset(positions,0,sizeof(positions)); + + while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { +@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in, + int outpos = 0; + int pos=0; + ++ pax_track_stack(); ++ + memset(positions,0,sizeof(positions)); + + while (outpos<destlen) { +diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c +index 170d289..3254b98 100644 +--- a/fs/jffs2/compr_rubin.c ++++ b/fs/jffs2/compr_rubin.c +@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, + int ret; + uint32_t mysrclen, mydstlen; + ++ pax_track_stack(); ++ + mysrclen = *sourcelen; + mydstlen = *dstlen - 8; + +diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c +index b47679b..00d65d3 100644 +--- a/fs/jffs2/erase.c ++++ b/fs/jffs2/erase.c +@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb + struct jffs2_unknown_node marker = { + .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = cpu_to_je32(c->cleanmarker_size) ++ .totlen = cpu_to_je32(c->cleanmarker_size), ++ .hdr_crc = cpu_to_je32(0) + }; + + jffs2_prealloc_raw_node_refs(c, jeb, 1); +diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c +index 5ef7bac..4fd1e3c 100644 +--- a/fs/jffs2/wbuf.c ++++ b/fs/jffs2/wbuf.c +@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker = + { + .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = constant_cpu_to_je32(8) ++ .totlen = constant_cpu_to_je32(8), ++ .hdr_crc = constant_cpu_to_je32(0) + }; + + /* +diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c +index 082e844..52012a1 100644 +--- a/fs/jffs2/xattr.c ++++ b/fs/jffs2/xattr.c +@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) + + BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); + ++ pax_track_stack(); ++ + /* Phase.1 : Merge same xref */ + for (i=0; i < XREF_TMPHASH_SIZE; i++) + xref_tmphash[i] = NULL; +diff --git a/fs/jfs/super.c b/fs/jfs/super.c +index 2234c73..f6e6e6b 100644 +--- a/fs/jfs/super.c ++++ b/fs/jfs/super.c +@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void) + + jfs_inode_cachep = + kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, +- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, ++ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY, + init_once); + if (jfs_inode_cachep == NULL) + return -ENOMEM; +diff --git a/fs/libfs.c b/fs/libfs.c +index ba36e93..3153fce 100644 +--- a/fs/libfs.c ++++ b/fs/libfs.c +@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) + + for (p=q->next; p != &dentry->d_subdirs; p=p->next) { + struct dentry *next; ++ char d_name[sizeof(next->d_iname)]; ++ const unsigned char *name; ++ + next = list_entry(p, struct dentry, d_u.d_child); + if (d_unhashed(next) || !next->d_inode) + continue; + + spin_unlock(&dcache_lock); +- if (filldir(dirent, next->d_name.name, ++ name = next->d_name.name; ++ if (name == next->d_iname) { ++ memcpy(d_name, name, next->d_name.len); ++ name = d_name; ++ } ++ if (filldir(dirent, name, + next->d_name.len, filp->f_pos, + next->d_inode->i_ino, + dt_type(next->d_inode)) < 0) +diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c +index c325a83..d15b07b 100644 +--- a/fs/lockd/clntproc.c ++++ b/fs/lockd/clntproc.c +@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops; + /* + * Cookie counter for NLM requests + */ +-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); ++static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234); + + void nlmclnt_next_cookie(struct nlm_cookie *c) + { +- u32 cookie = atomic_inc_return(&nlm_cookie); ++ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie); + + memcpy(c->data, &cookie, 4); + c->len=4; +@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) + struct nlm_rqst reqst, *req; + int status; + ++ pax_track_stack(); ++ + req = &reqst; + memset(req, 0, sizeof(*req)); + locks_init_lock(&req->a_args.lock.fl); +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c +index 1a54ae1..6a16c27 100644 +--- a/fs/lockd/svc.c ++++ b/fs/lockd/svc.c +@@ -43,7 +43,7 @@ + + static struct svc_program nlmsvc_program; + +-struct nlmsvc_binding * nlmsvc_ops; ++const struct nlmsvc_binding * nlmsvc_ops; + EXPORT_SYMBOL_GPL(nlmsvc_ops); + + static DEFINE_MUTEX(nlmsvc_mutex); +diff --git a/fs/locks.c b/fs/locks.c +index a8794f2..4041e55 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list); + + static struct kmem_cache *filelock_cache __read_mostly; + ++static void locks_init_lock_always(struct file_lock *fl) ++{ ++ fl->fl_next = NULL; ++ fl->fl_fasync = NULL; ++ fl->fl_owner = NULL; ++ fl->fl_pid = 0; ++ fl->fl_nspid = NULL; ++ fl->fl_file = NULL; ++ fl->fl_flags = 0; ++ fl->fl_type = 0; ++ fl->fl_start = fl->fl_end = 0; ++} ++ + /* Allocate an empty lock structure. */ + static struct file_lock *locks_alloc_lock(void) + { +- return kmem_cache_alloc(filelock_cache, GFP_KERNEL); ++ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL); ++ ++ if (fl) ++ locks_init_lock_always(fl); ++ ++ return fl; + } + + void locks_release_private(struct file_lock *fl) +@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl) + INIT_LIST_HEAD(&fl->fl_link); + INIT_LIST_HEAD(&fl->fl_block); + init_waitqueue_head(&fl->fl_wait); +- fl->fl_next = NULL; +- fl->fl_fasync = NULL; +- fl->fl_owner = NULL; +- fl->fl_pid = 0; +- fl->fl_nspid = NULL; +- fl->fl_file = NULL; +- fl->fl_flags = 0; +- fl->fl_type = 0; +- fl->fl_start = fl->fl_end = 0; + fl->fl_ops = NULL; + fl->fl_lmops = NULL; ++ locks_init_lock_always(fl); + } + + EXPORT_SYMBOL(locks_init_lock); +@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp) + return; + + if (filp->f_op && filp->f_op->flock) { +- struct file_lock fl = { ++ struct file_lock flock = { + .fl_pid = current->tgid, + .fl_file = filp, + .fl_flags = FL_FLOCK, + .fl_type = F_UNLCK, + .fl_end = OFFSET_MAX, + }; +- filp->f_op->flock(filp, F_SETLKW, &fl); +- if (fl.fl_ops && fl.fl_ops->fl_release_private) +- fl.fl_ops->fl_release_private(&fl); ++ filp->f_op->flock(filp, F_SETLKW, &flock); ++ if (flock.fl_ops && flock.fl_ops->fl_release_private) ++ flock.fl_ops->fl_release_private(&flock); + } + + lock_kernel(); +diff --git a/fs/mbcache.c b/fs/mbcache.c +index ec88ff3..b843a82 100644 +--- a/fs/mbcache.c ++++ b/fs/mbcache.c +@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op, + if (!cache) + goto fail; + cache->c_name = name; +- cache->c_op.free = NULL; ++ *(void **)&cache->c_op.free = NULL; + if (cache_op) +- cache->c_op.free = cache_op->free; ++ *(void **)&cache->c_op.free = cache_op->free; + atomic_set(&cache->c_entry_count, 0); + cache->c_bucket_bits = bucket_bits; + #ifdef MB_CACHE_INDEXES_COUNT +diff --git a/fs/namei.c b/fs/namei.c +index b0afbd4..8d065a1 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask, + return ret; + + /* ++ * Searching includes executable on directories, else just read. ++ */ ++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; ++ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) ++ if (capable(CAP_DAC_READ_SEARCH)) ++ return 0; ++ ++ /* + * Read/write DACs are always overridable. + * Executable DACs are overridable if at least one exec bit is set. + */ +@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask, + if (capable(CAP_DAC_OVERRIDE)) + return 0; + +- /* +- * Searching includes executable on directories, else just read. +- */ +- mask &= MAY_READ | MAY_WRITE | MAY_EXEC; +- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) +- if (capable(CAP_DAC_READ_SEARCH)) +- return 0; +- + return -EACCES; + } + +@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode) + if (!ret) + goto ok; + +- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH)) ++ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) || ++ capable(CAP_DAC_OVERRIDE)) + goto ok; + + return ret; +@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata + cookie = dentry->d_inode->i_op->follow_link(dentry, nd); + error = PTR_ERR(cookie); + if (!IS_ERR(cookie)) { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + error = 0; + if (s) + error = __vfs_follow_link(nd, s); +@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) + err = security_inode_follow_link(path->dentry, nd); + if (err) + goto loop; ++ ++ if (gr_handle_follow_link(path->dentry->d_parent->d_inode, ++ path->dentry->d_inode, path->dentry, nd->path.mnt)) { ++ err = -EACCES; ++ goto loop; ++ } ++ + current->link_count++; + current->total_link_count++; + nd->depth++; +@@ -1016,11 +1024,19 @@ return_reval: + break; + } + return_base: ++ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) && ++ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { ++ path_put(&nd->path); ++ return -ENOENT; ++ } + return 0; + out_dput: + path_put_conditional(&next, nd); + break; + } ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) ++ err = -ENOENT; ++ + path_put(&nd->path); + return_err: + return err; +@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name, + int retval = path_init(dfd, name, flags, nd); + if (!retval) + retval = path_walk(name, nd); +- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry && +- nd->path.dentry->d_inode)) +- audit_inode(name, nd->path.dentry); ++ ++ if (likely(!retval)) { ++ if (nd->path.dentry && nd->path.dentry->d_inode) { ++ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) ++ retval = -ENOENT; ++ if (!audit_dummy_context()) ++ audit_inode(name, nd->path.dentry); ++ } ++ } + if (nd->root.mnt) { + path_put(&nd->root); + nd->root.mnt = NULL; + } ++ + return retval; + } + +@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag) + if (error) + goto err_out; + ++ ++ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) { ++ error = -EPERM; ++ goto err_out; ++ } ++ if (gr_handle_rawio(inode)) { ++ error = -EPERM; ++ goto err_out; ++ } ++ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) { ++ error = -EACCES; ++ goto err_out; ++ } ++ + if (flag & O_TRUNC) { + error = get_write_access(inode); + if (error) +@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path, + { + int error; + struct dentry *dir = nd->path.dentry; ++ int acc_mode = ACC_MODE(flag); ++ ++ if (flag & O_TRUNC) ++ acc_mode |= MAY_WRITE; ++ if (flag & O_APPEND) ++ acc_mode |= MAY_APPEND; ++ ++ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } + + if (!IS_POSIXACL(dir->d_inode)) + mode &= ~current_umask(); +@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path, + if (error) + goto out_unlock; + error = vfs_create(dir->d_inode, path->dentry, mode, nd); ++ if (!error) ++ gr_handle_create(path->dentry, nd->path.mnt); + out_unlock: + mutex_unlock(&dir->d_inode->i_mutex); + dput(nd->path.dentry); +@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname, + &nd, flag); + if (error) + return ERR_PTR(error); ++ ++ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) { ++ error = -EPERM; ++ goto exit; ++ } ++ ++ if (gr_handle_rawio(nd.path.dentry->d_inode)) { ++ error = -EPERM; ++ goto exit; ++ } ++ ++ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) { ++ error = -EACCES; ++ goto exit; ++ } ++ + goto ok; + } + +@@ -1795,6 +1861,19 @@ do_last: + /* + * It already exists. + */ ++ ++ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) { ++ error = -ENOENT; ++ goto exit_mutex_unlock; ++ } ++ ++ /* only check if O_CREAT is specified, all other checks need ++ to go into may_open */ ++ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) { ++ error = -EACCES; ++ goto exit_mutex_unlock; ++ } ++ + mutex_unlock(&dir->d_inode->i_mutex); + audit_inode(pathname, path.dentry); + +@@ -1887,6 +1966,13 @@ do_link: + error = security_inode_follow_link(path.dentry, &nd); + if (error) + goto exit_dput; ++ ++ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode, ++ path.dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit_dput; ++ } ++ + error = __do_follow_link(&path, &nd); + if (error) { + /* Does someone understand code flow here? Or it is only +@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) + } + return dentry; + eexist: ++ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) { ++ dput(dentry); ++ return ERR_PTR(-ENOENT); ++ } + dput(dentry); + dentry = ERR_PTR(-EEXIST); + fail: +@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, + error = may_mknod(mode); + if (error) + goto out_dput; ++ ++ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) { ++ error = -EPERM; ++ goto out_dput; ++ } ++ ++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto out_dput; +@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, + } + out_drop_write: + mnt_drop_write(nd.path.mnt); ++ ++ if (!error) ++ gr_handle_create(dentry, nd.path.mnt); + out_dput: + dput(dentry); + out_unlock: +@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) + if (IS_ERR(dentry)) + goto out_unlock; + ++ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + if (!IS_POSIXACL(nd.path.dentry->d_inode)) + mode &= ~current_umask(); + error = mnt_want_write(nd.path.mnt); +@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) + error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); + out_drop_write: + mnt_drop_write(nd.path.mnt); ++ ++ if (!error) ++ gr_handle_create(dentry, nd.path.mnt); ++ + out_dput: + dput(dentry); + out_unlock: +@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname) + char * name; + struct dentry *dentry; + struct nameidata nd; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + + error = user_path_parent(dfd, pathname, &nd, &name); + if (error) +@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname) + error = PTR_ERR(dentry); + if (IS_ERR(dentry)) + goto exit2; ++ ++ if (dentry->d_inode != NULL) { ++ saved_ino = dentry->d_inode->i_ino; ++ saved_dev = gr_get_dev_from_dentry(dentry); ++ ++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit3; ++ } ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit3; +@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname) + if (error) + goto exit4; + error = vfs_rmdir(nd.path.dentry->d_inode, dentry); ++ if (!error && (saved_dev || saved_ino)) ++ gr_handle_delete(saved_ino, saved_dev); + exit4: + mnt_drop_write(nd.path.mnt); + exit3: +@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname) + struct dentry *dentry; + struct nameidata nd; + struct inode *inode = NULL; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + + error = user_path_parent(dfd, pathname, &nd, &name); + if (error) +@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname) + if (nd.last.name[nd.last.len]) + goto slashes; + inode = dentry->d_inode; +- if (inode) ++ if (inode) { ++ if (inode->i_nlink <= 1) { ++ saved_ino = inode->i_ino; ++ saved_dev = gr_get_dev_from_dentry(dentry); ++ } ++ + atomic_inc(&inode->i_count); ++ ++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit2; ++ } ++ } + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit2; +@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname) + if (error) + goto exit3; + error = vfs_unlink(nd.path.dentry->d_inode, dentry); ++ if (!error && (saved_ino || saved_dev)) ++ gr_handle_delete(saved_ino, saved_dev); + exit3: + mnt_drop_write(nd.path.mnt); + exit2: +@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, + if (IS_ERR(dentry)) + goto out_unlock; + ++ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto out_dput; +@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, + if (error) + goto out_drop_write; + error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); ++ if (!error) ++ gr_handle_create(dentry, nd.path.mnt); + out_drop_write: + mnt_drop_write(nd.path.mnt); + out_dput: +@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) + goto out_unlock; ++ ++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt, ++ old_path.dentry->d_inode, ++ old_path.dentry->d_inode->i_mode, to)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ ++ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt, ++ old_path.dentry, old_path.mnt, to)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto out_dput; +@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, + if (error) + goto out_drop_write; + error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); ++ if (!error) ++ gr_handle_create(new_dentry, nd.path.mnt); + out_drop_write: + mnt_drop_write(nd.path.mnt); + out_dput: +@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, + char *to; + int error; + ++ pax_track_stack(); ++ + error = user_path_parent(olddfd, oldname, &oldnd, &from); + if (error) + goto exit; +@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, + if (new_dentry == trap) + goto exit5; + ++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt, ++ old_dentry, old_dir->d_inode, oldnd.path.mnt, ++ to); ++ if (error) ++ goto exit5; ++ + error = mnt_want_write(oldnd.path.mnt); + if (error) + goto exit5; +@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, + goto exit6; + error = vfs_rename(old_dir->d_inode, old_dentry, + new_dir->d_inode, new_dentry); ++ if (!error) ++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry, ++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0); + exit6: + mnt_drop_write(oldnd.path.mnt); + exit5: +@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna + + int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) + { ++ char tmpbuf[64]; ++ const char *newlink; + int len; + + len = PTR_ERR(link); +@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c + len = strlen(link); + if (len > (unsigned) buflen) + len = buflen; +- if (copy_to_user(buffer, link, len)) ++ ++ if (len < sizeof(tmpbuf)) { ++ memcpy(tmpbuf, link, len); ++ newlink = tmpbuf; ++ } else ++ newlink = link; ++ ++ if (copy_to_user(buffer, newlink, len)) + len = -EFAULT; + out: + return len; +diff --git a/fs/namespace.c b/fs/namespace.c +index 2beb0fb..11a95a5 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags) + if (!(sb->s_flags & MS_RDONLY)) + retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); + up_write(&sb->s_umount); ++ ++ gr_log_remount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags) + security_sb_umount_busy(mnt); + up_write(&namespace_sem); + release_mounts(&umount_list); ++ ++ gr_log_unmount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, + if (retval) + goto dput_out; + ++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ ++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ + if (flags & MS_REMOUNT) + retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, + data_page); +@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, + dev_name, data_page); + dput_out: + path_put(&path); ++ ++ gr_log_mount(dev_name, dir_name, retval); ++ + return retval; + } + +@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + goto out1; + } + ++ if (gr_handle_chroot_pivot()) { ++ error = -EPERM; ++ path_put(&old); ++ goto out1; ++ } ++ + read_lock(¤t->fs->lock); + root = current->fs->root; + path_get(¤t->fs->root); +diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c +index b8b5b30..2bd9ccb 100644 +--- a/fs/ncpfs/dir.c ++++ b/fs/ncpfs/dir.c +@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry) + int res, val = 0, len; + __u8 __name[NCP_MAXPATHLEN + 1]; + ++ pax_track_stack(); ++ + parent = dget_parent(dentry); + dir = parent->d_inode; + +@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc + int error, res, len; + __u8 __name[NCP_MAXPATHLEN + 1]; + ++ pax_track_stack(); ++ + lock_kernel(); + error = -EIO; + if (!ncp_conn_valid(server)) +@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode, + int error, result, len; + int opmode; + __u8 __name[NCP_MAXPATHLEN + 1]; +- ++ + PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n", + dentry->d_parent->d_name.name, dentry->d_name.name, mode); + ++ pax_track_stack(); ++ + error = -EIO; + lock_kernel(); + if (!ncp_conn_valid(server)) +@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode) + int error, len; + __u8 __name[NCP_MAXPATHLEN + 1]; + ++ pax_track_stack(); ++ + DPRINTK("ncp_mkdir: making %s/%s\n", + dentry->d_parent->d_name.name, dentry->d_name.name); + +@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode) + if (!ncp_conn_valid(server)) + goto out; + ++ pax_track_stack(); ++ + ncp_age_dentry(server, dentry); + len = sizeof(__name); + error = ncp_io2vol(server, __name, &len, dentry->d_name.name, +@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry, + int old_len, new_len; + __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1]; + ++ pax_track_stack(); ++ + DPRINTK("ncp_rename: %s/%s to %s/%s\n", + old_dentry->d_parent->d_name.name, old_dentry->d_name.name, + new_dentry->d_parent->d_name.name, new_dentry->d_name.name); +diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c +index cf98da1..da890a9 100644 +--- a/fs/ncpfs/inode.c ++++ b/fs/ncpfs/inode.c +@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) + #endif + struct ncp_entry_info finfo; + ++ pax_track_stack(); ++ + data.wdog_pid = NULL; + server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); + if (!server) +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index bfaef7b..e9d03ca 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode) + nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); + nfsi->attrtimeo_timestamp = jiffies; + +- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); ++ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf)); + if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) + nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; + else +@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt + return nfs_size_to_loff_t(fattr->size) > i_size_read(inode); + } + +-static atomic_long_t nfs_attr_generation_counter; ++static atomic_long_unchecked_t nfs_attr_generation_counter; + + static unsigned long nfs_read_attr_generation_counter(void) + { +- return atomic_long_read(&nfs_attr_generation_counter); ++ return atomic_long_read_unchecked(&nfs_attr_generation_counter); + } + + unsigned long nfs_inc_attr_generation_counter(void) + { +- return atomic_long_inc_return(&nfs_attr_generation_counter); ++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter); + } + + void nfs_fattr_init(struct nfs_fattr *fattr) +diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c +index cc2f505..f6a236f 100644 +--- a/fs/nfsd/lockd.c ++++ b/fs/nfsd/lockd.c +@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp) + fput(filp); + } + +-static struct nlmsvc_binding nfsd_nlm_ops = { ++static const struct nlmsvc_binding nfsd_nlm_ops = { + .fopen = nlm_fopen, /* open file for locking */ + .fclose = nlm_fclose, /* close file */ + }; +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index cfc3391..dcc083a 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + unsigned int cmd; + int err; + ++ pax_track_stack(); ++ + dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", + (long long) lock->lk_offset, + (long long) lock->lk_length); +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 4a82a96..0d5fb49 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, + struct nfsd4_compoundres *resp = rqstp->rq_resp; + u32 minorversion = resp->cstate.minorversion; + ++ pax_track_stack(); ++ + BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1); + BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion)); + BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion)); +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index 2e09588..596421d 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, + } else { + oldfs = get_fs(); + set_fs(KERNEL_DS); +- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset); ++ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset); + set_fs(oldfs); + } + +@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, + + /* Write the data. */ + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); ++ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset); + set_fs(oldfs); + if (host_err < 0) + goto out_nfserr; +@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) + */ + + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = inode->i_op->readlink(dentry, buf, *lenp); ++ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp); + set_fs(oldfs); + + if (host_err < 0) +diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c +index f6af760..d0adf34 100644 +--- a/fs/nilfs2/ioctl.c ++++ b/fs/nilfs2/ioctl.c +@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, + unsigned int cmd, void __user *argp) + { + struct nilfs_argv argv[5]; +- const static size_t argsz[5] = { ++ static const size_t argsz[5] = { + sizeof(struct nilfs_vdesc), + sizeof(struct nilfs_period), + sizeof(__u64), +@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, + if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment) + goto out_free; + ++ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size) ++ goto out_free; ++ + len = argv[n].v_size * argv[n].v_nmembs; + base = (void __user *)(unsigned long)argv[n].v_base; + if (len == 0) { +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c +index ad391a8..149a8a1 100644 +--- a/fs/nilfs2/the_nilfs.c ++++ b/fs/nilfs2/the_nilfs.c +@@ -478,6 +478,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, + brelse(sbh[1]); + sbh[1] = NULL; + sbp[1] = NULL; ++ valid[1] = 0; + swp = 0; + } + if (!valid[swp]) { +diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c +index 7e54e52..9337248 100644 +--- a/fs/notify/dnotify/dnotify.c ++++ b/fs/notify/dnotify/dnotify.c +@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry) + kmem_cache_free(dnotify_mark_entry_cache, dnentry); + } + +-static struct fsnotify_ops dnotify_fsnotify_ops = { ++static const struct fsnotify_ops dnotify_fsnotify_ops = { + .handle_event = dnotify_handle_event, + .should_send_event = dnotify_should_send_event, + .free_group_priv = NULL, +diff --git a/fs/notify/notification.c b/fs/notify/notification.c +index b8bf53b..c518688 100644 +--- a/fs/notify/notification.c ++++ b/fs/notify/notification.c +@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep; + * get set to 0 so it will never get 'freed' + */ + static struct fsnotify_event q_overflow_event; +-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); ++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0); + + /** + * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. +@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); + */ + u32 fsnotify_get_cookie(void) + { +- return atomic_inc_return(&fsnotify_sync_cookie); ++ return atomic_inc_return_unchecked(&fsnotify_sync_cookie); + } + EXPORT_SYMBOL_GPL(fsnotify_get_cookie); + +diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c +index 5a9e344..0f8cd28 100644 +--- a/fs/ntfs/dir.c ++++ b/fs/ntfs/dir.c +@@ -1328,7 +1328,7 @@ find_next_index_buffer: + ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & + ~(s64)(ndir->itype.index.block_size - 1))); + /* Bounds checks. */ +- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { ++ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { + ntfs_error(sb, "Out of bounds check failed. Corrupt directory " + "inode 0x%lx or driver bug.", vdir->i_ino); + goto err_out; +diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c +index 663c0e3..b6868e9 100644 +--- a/fs/ntfs/file.c ++++ b/fs/ntfs/file.c +@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = { + #endif /* NTFS_RW */ + }; + +-const struct file_operations ntfs_empty_file_ops = {}; ++const struct file_operations ntfs_empty_file_ops __read_only; + +-const struct inode_operations ntfs_empty_inode_ops = {}; ++const struct inode_operations ntfs_empty_inode_ops __read_only; +diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c +index 1cd2934..880b5d2 100644 +--- a/fs/ocfs2/cluster/masklog.c ++++ b/fs/ocfs2/cluster/masklog.c +@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr, + return mlog_mask_store(mlog_attr->mask, buf, count); + } + +-static struct sysfs_ops mlog_attr_ops = { ++static const struct sysfs_ops mlog_attr_ops = { + .show = mlog_show, + .store = mlog_store, + }; +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c +index ac10f83..2cd2607 100644 +--- a/fs/ocfs2/localalloc.c ++++ b/fs/ocfs2/localalloc.c +@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, + goto bail; + } + +- atomic_inc(&osb->alloc_stats.moves); ++ atomic_inc_unchecked(&osb->alloc_stats.moves); + + status = 0; + bail: +diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c +index f010b22..9f9ed34 100644 +--- a/fs/ocfs2/namei.c ++++ b/fs/ocfs2/namei.c +@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir, + struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; + struct ocfs2_dir_lookup_result target_insert = { NULL, }; + ++ pax_track_stack(); ++ + /* At some point it might be nice to break this function up a + * bit. */ + +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h +index d963d86..914cfbd 100644 +--- a/fs/ocfs2/ocfs2.h ++++ b/fs/ocfs2/ocfs2.h +@@ -217,11 +217,11 @@ enum ocfs2_vol_state + + struct ocfs2_alloc_stats + { +- atomic_t moves; +- atomic_t local_data; +- atomic_t bitmap_data; +- atomic_t bg_allocs; +- atomic_t bg_extends; ++ atomic_unchecked_t moves; ++ atomic_unchecked_t local_data; ++ atomic_unchecked_t bitmap_data; ++ atomic_unchecked_t bg_allocs; ++ atomic_unchecked_t bg_extends; + }; + + enum ocfs2_local_alloc_state +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c +index 79b5dac..d322952 100644 +--- a/fs/ocfs2/suballoc.c ++++ b/fs/ocfs2/suballoc.c +@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, + mlog_errno(status); + goto bail; + } +- atomic_inc(&osb->alloc_stats.bg_extends); ++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends); + + /* You should never ask for this much metadata */ + BUG_ON(bits_wanted > +@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb, + mlog_errno(status); + goto bail; + } +- atomic_inc(&osb->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs); + + *blkno_start = bg_blkno + (u64) *suballoc_bit_start; + ac->ac_bits_given += (*num_bits); +@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb, + mlog_errno(status); + goto bail; + } +- atomic_inc(&osb->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs); + + BUG_ON(num_bits != 1); + +@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb, + cluster_start, + num_clusters); + if (!status) +- atomic_inc(&osb->alloc_stats.local_data); ++ atomic_inc_unchecked(&osb->alloc_stats.local_data); + } else { + if (min_clusters > (osb->bitmap_cpg - 1)) { + /* The only paths asking for contiguousness +@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb, + ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode, + bg_blkno, + bg_bit_off); +- atomic_inc(&osb->alloc_stats.bitmap_data); ++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data); + } + } + if (status < 0) { +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c +index 9f55be4..a3f8048 100644 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c +@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) + "%10s => GlobalAllocs: %d LocalAllocs: %d " + "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n", + "Stats", +- atomic_read(&osb->alloc_stats.bitmap_data), +- atomic_read(&osb->alloc_stats.local_data), +- atomic_read(&osb->alloc_stats.bg_allocs), +- atomic_read(&osb->alloc_stats.moves), +- atomic_read(&osb->alloc_stats.bg_extends)); ++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data), ++ atomic_read_unchecked(&osb->alloc_stats.local_data), ++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs), ++ atomic_read_unchecked(&osb->alloc_stats.moves), ++ atomic_read_unchecked(&osb->alloc_stats.bg_extends)); + + out += snprintf(buf + out, len - out, + "%10s => State: %u Descriptor: %llu Size: %u bits " +@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb, + spin_lock_init(&osb->osb_xattr_lock); + ocfs2_init_inode_steal_slot(osb); + +- atomic_set(&osb->alloc_stats.moves, 0); +- atomic_set(&osb->alloc_stats.local_data, 0); +- atomic_set(&osb->alloc_stats.bitmap_data, 0); +- atomic_set(&osb->alloc_stats.bg_allocs, 0); +- atomic_set(&osb->alloc_stats.bg_extends, 0); ++ atomic_set_unchecked(&osb->alloc_stats.moves, 0); ++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0); + + /* Copy the blockcheck stats from the superblock probe */ + osb->osb_ecc_stats = *stats; +diff --git a/fs/open.c b/fs/open.c +index 4f01e06..2a8057a 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) + error = locks_verify_truncate(inode, NULL, length); + if (!error) + error = security_path_truncate(&path, length, 0); ++ ++ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt)) ++ error = -EACCES; ++ + if (!error) { + vfs_dq_init(inode); + error = do_truncate(path.dentry, length, 0, NULL); +@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) + if (__mnt_is_readonly(path.mnt)) + res = -EROFS; + ++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode)) ++ res = -EACCES; ++ + out_path_release: + path_put(&path); + out: +@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename) + if (error) + goto dput_and_out; + ++ gr_log_chdir(path.dentry, path.mnt); ++ + set_fs_pwd(current->fs, &path); + + dput_and_out: +@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd) + goto out_putf; + + error = inode_permission(inode, MAY_EXEC | MAY_ACCESS); ++ ++ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt)) ++ error = -EPERM; ++ ++ if (!error) ++ gr_log_chdir(file->f_path.dentry, file->f_path.mnt); ++ + if (!error) + set_fs_pwd(current->fs, &file->f_path); + out_putf: +@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename) + if (!capable(CAP_SYS_CHROOT)) + goto dput_and_out; + ++ if (gr_handle_chroot_chroot(path.dentry, path.mnt)) ++ goto dput_and_out; ++ + set_fs_root(current->fs, &path); ++ ++ gr_handle_chroot_chdir(&path); ++ + error = 0; + dput_and_out: + path_put(&path); +@@ -596,66 +618,57 @@ out: + return error; + } + ++static int chmod_common(struct path *path, umode_t mode) ++{ ++ struct inode *inode = path->dentry->d_inode; ++ struct iattr newattrs; ++ int error; ++ ++ error = mnt_want_write(path->mnt); ++ if (error) ++ return error; ++ mutex_lock(&inode->i_mutex); ++ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } ++ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) { ++ error = -EPERM; ++ goto out_unlock; ++ } ++ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); ++ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; ++ error = notify_change(path->dentry, &newattrs); ++out_unlock: ++ mutex_unlock(&inode->i_mutex); ++ mnt_drop_write(path->mnt); ++ return error; ++} ++ + SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode) + { +- struct inode * inode; +- struct dentry * dentry; + struct file * file; + int err = -EBADF; +- struct iattr newattrs; + + file = fget(fd); +- if (!file) +- goto out; +- +- dentry = file->f_path.dentry; +- inode = dentry->d_inode; +- +- audit_inode(NULL, dentry); +- +- err = mnt_want_write_file(file); +- if (err) +- goto out_putf; +- mutex_lock(&inode->i_mutex); +- if (mode == (mode_t) -1) +- mode = inode->i_mode; +- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); +- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; +- err = notify_change(dentry, &newattrs); +- mutex_unlock(&inode->i_mutex); +- mnt_drop_write(file->f_path.mnt); +-out_putf: +- fput(file); +-out: ++ if (file) { ++ audit_inode(NULL, file->f_path.dentry); ++ err = chmod_common(&file->f_path, mode); ++ fput(file); ++ } + return err; + } + + SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode) + { + struct path path; +- struct inode *inode; + int error; +- struct iattr newattrs; + + error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path); +- if (error) +- goto out; +- inode = path.dentry->d_inode; +- +- error = mnt_want_write(path.mnt); +- if (error) +- goto dput_and_out; +- mutex_lock(&inode->i_mutex); +- if (mode == (mode_t) -1) +- mode = inode->i_mode; +- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); +- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; +- error = notify_change(path.dentry, &newattrs); +- mutex_unlock(&inode->i_mutex); +- mnt_drop_write(path.mnt); +-dput_and_out: +- path_put(&path); +-out: ++ if (!error) { ++ error = chmod_common(&path, mode); ++ path_put(&path); ++ } + return error; + } + +@@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode) + return sys_fchmodat(AT_FDCWD, filename, mode); + } + +-static int chown_common(struct dentry * dentry, uid_t user, gid_t group) ++static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt) + { + struct inode *inode = dentry->d_inode; + int error; + struct iattr newattrs; + ++ if (!gr_acl_handle_chown(dentry, mnt)) ++ return -EACCES; ++ + newattrs.ia_valid = ATTR_CTIME; + if (user != (uid_t) -1) { + newattrs.ia_valid |= ATTR_UID; +@@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) + error = mnt_want_write(path.mnt); + if (error) + goto out_release; +- error = chown_common(path.dentry, user, group); ++ error = chown_common(path.dentry, user, group, path.mnt); + mnt_drop_write(path.mnt); + out_release: + path_put(&path); +@@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, + error = mnt_want_write(path.mnt); + if (error) + goto out_release; +- error = chown_common(path.dentry, user, group); ++ error = chown_common(path.dentry, user, group, path.mnt); + mnt_drop_write(path.mnt); + out_release: + path_put(&path); +@@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group + error = mnt_want_write(path.mnt); + if (error) + goto out_release; +- error = chown_common(path.dentry, user, group); ++ error = chown_common(path.dentry, user, group, path.mnt); + mnt_drop_write(path.mnt); + out_release: + path_put(&path); +@@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group) + goto out_fput; + dentry = file->f_path.dentry; + audit_inode(NULL, dentry); +- error = chown_common(dentry, user, group); ++ error = chown_common(dentry, user, group, file->f_path.mnt); + mnt_drop_write(file->f_path.mnt); + out_fput: + fput(file); +@@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode) + if (!IS_ERR(tmp)) { + fd = get_unused_fd_flags(flags); + if (fd >= 0) { +- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0); ++ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0); + if (IS_ERR(f)) { + put_unused_fd(fd); + fd = PTR_ERR(f); +diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c +index 6ab70f4..f4103d1 100644 +--- a/fs/partitions/efi.c ++++ b/fs/partitions/efi.c +@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt) + if (!bdev || !gpt) + return NULL; + ++ if (!le32_to_cpu(gpt->num_partition_entries)) ++ return NULL; ++ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL); ++ if (!pte) ++ return NULL; ++ + count = le32_to_cpu(gpt->num_partition_entries) * + le32_to_cpu(gpt->sizeof_partition_entry); +- if (!count) +- return NULL; +- pte = kzalloc(count, GFP_KERNEL); +- if (!pte) +- return NULL; +- + if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba), + (u8 *) pte, + count) < count) { +diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c +index dd6efdb..3babc6c 100644 +--- a/fs/partitions/ldm.c ++++ b/fs/partitions/ldm.c +@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) + ldm_error ("A VBLK claims to have %d parts.", num); + return false; + } ++ + if (rec >= num) { + ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num); + return false; +@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) + goto found; + } + +- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL); ++ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL); + if (!f) { + ldm_crit ("Out of memory."); + return false; +diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c +index 5765198..7f8e9e0 100644 +--- a/fs/partitions/mac.c ++++ b/fs/partitions/mac.c +@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev) + return 0; /* not a MacOS disk */ + } + blocks_in_map = be32_to_cpu(part->map_count); +- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { +- put_dev_sector(sect); +- return 0; +- } + printk(" [mac]"); ++ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { ++ put_dev_sector(sect); ++ return 0; ++ } + for (slot = 1; slot <= blocks_in_map; ++slot) { + int pos = slot * secsize; + put_dev_sector(sect); +diff --git a/fs/pipe.c b/fs/pipe.c +index d0cc080..8a6f211 100644 +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -401,9 +401,9 @@ redo: + } + if (bufs) /* More to do? */ + continue; +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + /* syscall merging: Usually we must not sleep + * if O_NONBLOCK is set, or if we got some data. + * But if a writer sleeps in kernel space, then +@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, + mutex_lock(&inode->i_mutex); + pipe = inode->i_pipe; + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + goto out; +@@ -511,7 +511,7 @@ redo1: + for (;;) { + int bufs; + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -597,9 +597,9 @@ redo2: + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + do_wakeup = 0; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + out: + mutex_unlock(&inode->i_mutex); +@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait) + mask = 0; + if (filp->f_mode & FMODE_READ) { + mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; +- if (!pipe->writers && filp->f_version != pipe->w_counter) ++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter) + mask |= POLLHUP; + } + +@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait) + * Most Unices do not set POLLERR for FIFOs but on Linux they + * behave exactly like pipes for poll(). + */ +- if (!pipe->readers) ++ if (!atomic_read(&pipe->readers)) + mask |= POLLERR; + } + +@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw) + + mutex_lock(&inode->i_mutex); + pipe = inode->i_pipe; +- pipe->readers -= decr; +- pipe->writers -= decw; ++ atomic_sub(decr, &pipe->readers); ++ atomic_sub(decw, &pipe->writers); + +- if (!pipe->readers && !pipe->writers) { ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) { + free_pipe_info(inode); + } else { + wake_up_interruptible_sync(&pipe->wait); +@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp) + + if (inode->i_pipe) { + ret = 0; +- inode->i_pipe->readers++; ++ atomic_inc(&inode->i_pipe->readers); + } + + mutex_unlock(&inode->i_mutex); +@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp) + + if (inode->i_pipe) { + ret = 0; +- inode->i_pipe->writers++; ++ atomic_inc(&inode->i_pipe->writers); + } + + mutex_unlock(&inode->i_mutex); +@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp) + if (inode->i_pipe) { + ret = 0; + if (filp->f_mode & FMODE_READ) +- inode->i_pipe->readers++; ++ atomic_inc(&inode->i_pipe->readers); + if (filp->f_mode & FMODE_WRITE) +- inode->i_pipe->writers++; ++ atomic_inc(&inode->i_pipe->writers); + } + + mutex_unlock(&inode->i_mutex); +@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode) + inode->i_pipe = NULL; + } + +-static struct vfsmount *pipe_mnt __read_mostly; ++struct vfsmount *pipe_mnt __read_mostly; + static int pipefs_delete_dentry(struct dentry *dentry) + { + /* +@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void) + goto fail_iput; + inode->i_pipe = pipe; + +- pipe->readers = pipe->writers = 1; ++ atomic_set(&pipe->readers, 1); ++ atomic_set(&pipe->writers, 1); + inode->i_fop = &rdwr_pipefifo_fops; + + /* +diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig +index 50f8f06..c5755df 100644 +--- a/fs/proc/Kconfig ++++ b/fs/proc/Kconfig +@@ -30,12 +30,12 @@ config PROC_FS + + config PROC_KCORE + bool "/proc/kcore support" if !ARM +- depends on PROC_FS && MMU ++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD + + config PROC_VMCORE + bool "/proc/vmcore support (EXPERIMENTAL)" +- depends on PROC_FS && CRASH_DUMP +- default y ++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC ++ default n + help + Exports the dump image of crashed kernel in ELF format. + +@@ -59,8 +59,8 @@ config PROC_SYSCTL + limited in memory. + + config PROC_PAGE_MONITOR +- default y +- depends on PROC_FS && MMU ++ default n ++ depends on PROC_FS && MMU && !GRKERNSEC + bool "Enable /proc page monitoring" if EMBEDDED + help + Various /proc files exist to monitor process memory utilization: +diff --git a/fs/proc/array.c b/fs/proc/array.c +index c5ef152..28c94f7 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -60,6 +60,7 @@ + #include <linux/tty.h> + #include <linux/string.h> + #include <linux/mman.h> ++#include <linux/grsecurity.h> + #include <linux/proc_fs.h> + #include <linux/ioport.h> + #include <linux/uaccess.h> +@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m, + p->nivcsw); + } + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline void task_pax(struct seq_file *m, struct task_struct *p) ++{ ++ if (p->mm) ++ seq_printf(m, "PaX:\t%c%c%c%c%c\n", ++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p', ++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e', ++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm', ++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r', ++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's'); ++ else ++ seq_printf(m, "PaX:\t-----\n"); ++} ++#endif ++ + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) + { +@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + task_cap(m, task); + cpuset_task_status_allowed(m, task); + task_context_switch_counts(m, task); ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ task_pax(m, task); ++#endif ++ ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++ task_grsec_rbac(m, task); ++#endif ++ + return 0; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task, int whole) + { +@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + cputime_t cutime, cstime, utime, stime; + cputime_t cgtime, gtime; + unsigned long rsslim = 0; +- char tcomm[sizeof(task->comm)]; ++ char tcomm[sizeof(task->comm)] = { 0 }; + unsigned long flags; + ++ pax_track_stack(); ++ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("stat"); ++ return 0; ++ } ++#endif ++ + state = *get_task_state(task); + vsize = eip = esp = 0; + permitted = ptrace_may_access(task, PTRACE_MODE_READ); +@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + gtime = task_gtime(task); + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (PAX_RAND_FLAGS(mm)) { ++ eip = 0; ++ esp = 0; ++ wchan = 0; ++ } ++#endif ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ wchan = 0; ++ eip =0; ++ esp =0; ++#endif ++ + /* scale priority and nice values from timeslices to -20..20 */ + /* to make it look like a "normal" Unix priority/nice value */ + priority = task_prio(task); +@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + vsize, + mm ? get_mm_rss(mm) : 0, + rsslim, ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0), ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0), ++ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0), ++#else + mm ? (permitted ? mm->start_code : 1) : 0, + mm ? (permitted ? mm->end_code : 1) : 0, + (permitted && mm) ? mm->start_stack : 0, ++#endif + esp, + eip, + /* The signal information here is obsolete. +@@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) + { + int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0; +- struct mm_struct *mm = get_task_mm(task); ++ struct mm_struct *mm; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("statm"); ++ return 0; ++ } ++#endif ++ ++ mm = get_task_mm(task); + if (mm) { + size = task_statm(mm, &shared, &text, &data, &resident); + mmput(mm); +@@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + + return 0; + } ++ ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++int proc_pid_ipaddr(struct task_struct *task, char *buffer) ++{ ++ u32 curr_ip = 0; ++ unsigned long flags; ++ ++ if (lock_task_sighand(task, &flags)) { ++ curr_ip = task->signal->curr_ip; ++ unlock_task_sighand(task, &flags); ++ } ++ ++ return sprintf(buffer, "%pI4\n", &curr_ip); ++} ++#endif +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 67f7dc0..a86ad9a 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -102,6 +102,22 @@ struct pid_entry { + union proc_op op; + }; + ++struct getdents_callback { ++ struct linux_dirent __user * current_dir; ++ struct linux_dirent __user * previous; ++ struct file * file; ++ int count; ++ int error; ++}; ++ ++static int gr_fake_filldir(void * __buf, const char *name, int namlen, ++ loff_t offset, u64 ino, unsigned int d_type) ++{ ++ struct getdents_callback * buf = (struct getdents_callback *) __buf; ++ buf->error = -EINVAL; ++ return 0; ++} ++ + #define NOD(NAME, MODE, IOP, FOP, OP) { \ + .name = (NAME), \ + .len = sizeof(NAME) - 1, \ +@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task) + if (task == current) + return 0; + ++ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)) ++ return -EPERM; ++ + /* + * If current is actively ptrace'ing, and would also be + * permitted to freshly attach with ptrace now, permit it. +@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer) + if (!mm->arg_end) + goto out_mm; /* Shh! No looking before we're done */ + ++ if (gr_acl_handle_procpidmem(task)) ++ goto out_mm; ++ + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) +@@ -287,12 +309,28 @@ out: + return res; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int proc_pid_auxv(struct task_struct *task, char *buffer) + { + int res = 0; + struct mm_struct *mm = get_task_mm(task); + if (mm) { + unsigned int nwords = 0; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* allow if we're currently ptracing this task */ ++ if (PAX_RAND_FLAGS(mm) && ++ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) { ++ mmput(mm); ++ return 0; ++ } ++#endif ++ + do { + nwords += 2; + } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ +@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer) + } + + +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + /* + * Provides a wchan file via kallsyms in a proper one-value-per-file format. + * Returns the resolved symbol. If that fails, simply return the address. +@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task) + mutex_unlock(&task->cred_guard_mutex); + } + +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + + #define MAX_STACK_TRACE_DEPTH 64 + +@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer) + return count; + } + +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + static int proc_pid_syscall(struct task_struct *task, char *buffer) + { + long nr; +@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer) + /************************************************************************/ + + /* permission checks */ +-static int proc_fd_access_allowed(struct inode *inode) ++static int proc_fd_access_allowed(struct inode *inode, unsigned int log) + { + struct task_struct *task; + int allowed = 0; +@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode) + */ + task = get_proc_task(inode); + if (task) { +- allowed = ptrace_may_access(task, PTRACE_MODE_READ); ++ if (log) ++ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ); ++ else ++ allowed = ptrace_may_access(task, PTRACE_MODE_READ); + put_task_struct(task); + } + return allowed; +@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = { + static int mem_open(struct inode* inode, struct file* file) + { + file->private_data = (void*)((long)current->self_exec_id); ++ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ file->f_version = current->exec_id; ++#endif ++ + return 0; + } + ++static int task_dumpable(struct task_struct *task); ++ + static ssize_t mem_read(struct file * file, char __user * buf, + size_t count, loff_t *ppos) + { +@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf, + int ret = -ESRCH; + struct mm_struct *mm; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (file->f_version != current->exec_id) { ++ gr_log_badprocpid("mem"); ++ return 0; ++ } ++#endif ++ + if (!task) + goto out_no_task; + +@@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf, + if (!task) + goto out_no_task; + ++ if (gr_acl_handle_procpidmem(task)) ++ goto out; ++ + if (!ptrace_may_access(task, PTRACE_MODE_READ)) + goto out; + +@@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) + path_put(&nd->path); + + /* Are we allowed to snoop on the tasks file descriptors? */ +- if (!proc_fd_access_allowed(inode)) ++ if (!proc_fd_access_allowed(inode,0)) + goto out; + + error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); +@@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b + struct path path; + + /* Are we allowed to snoop on the tasks file descriptors? */ +- if (!proc_fd_access_allowed(inode)) +- goto out; ++ /* logging this is needed for learning on chromium to work properly, ++ but we don't want to flood the logs from 'ps' which does a readlink ++ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn ++ CAP_SYS_PTRACE as it's not necessary for its basic functionality ++ */ ++ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') { ++ if (!proc_fd_access_allowed(inode,0)) ++ goto out; ++ } else { ++ if (!proc_fd_access_allowed(inode,1)) ++ goto out; ++ } + + error = PROC_I(inode)->op.proc_get_link(inode, &path); + if (error) +@@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } + security_task_to_inode(task, inode); +@@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat + struct inode *inode = dentry->d_inode; + struct task_struct *task; + const struct cred *cred; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *tmpcred = current_cred(); ++#endif + + generic_fillattr(inode, stat); + +@@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat + stat->uid = 0; + stat->gid = 0; + task = pid_task(proc_pid(inode), PIDTYPE_PID); ++ ++ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ + if (task) { ++ cred = __task_cred(task); ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (!tmpcred->uid || (tmpcred->uid == cred->uid) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ || in_group_p(CONFIG_GRKERNSEC_PROC_GID) ++#endif ++ ) { ++#endif + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { +- cred = __task_cred(task); + stat->uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ stat->gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + stat->gid = cred->egid; ++#endif + } ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ } else { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++#endif + } + rcu_read_unlock(); + return 0; +@@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) + + if (task) { + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } else { + inode->i_uid = 0; +@@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info) + int fd = proc_fd(inode); + + if (task) { +- files = get_files_struct(task); ++ if (!gr_acl_handle_procpidmem(task)) ++ files = get_files_struct(task); + put_task_struct(task); + } + if (files) { +@@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = { + static int proc_fd_permission(struct inode *inode, int mask) + { + int rv; ++ struct task_struct *task; + + rv = generic_permission(inode, mask, NULL); +- if (rv == 0) +- return 0; ++ + if (task_pid(current) == proc_pid(inode)) + rv = 0; ++ ++ task = get_proc_task(inode); ++ if (task == NULL) ++ return rv; ++ ++ if (gr_acl_handle_procpidmem(task)) ++ rv = -EACCES; ++ ++ put_task_struct(task); ++ + return rv; + } + +@@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir, + if (!task) + goto out_no_task; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + /* + * Yes, it does not scale. And it should not. Don't add + * new entries into /proc/<tgid>/ without very good reasons. +@@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp, + if (!task) + goto out_no_task; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + ret = 0; + i = filp->f_pos; + switch (i) { +@@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) + static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + __putname(s); + } +@@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_SCHED_DEBUG + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), + #endif +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUGO, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_SECURITY + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), + #endif +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUGO, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +@@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_TASK_IO_ACCOUNTING + INF("io", S_IRUSR, proc_tgid_io_accounting), + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr), ++#endif + }; + + static int proc_tgid_base_readdir(struct file * filp, +@@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir, + if (!inode) + goto out; + ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP; ++#else + inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; ++#endif + inode->i_op = &proc_tgid_base_inode_operations; + inode->i_fop = &proc_tgid_base_operations; + inode->i_flags|=S_IMMUTABLE; +@@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct + if (!task) + goto out; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out_put_task; ++ + result = proc_pid_instantiate(dir, dentry, task, NULL); ++out_put_task: + put_task_struct(task); + out: + return result; +@@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) + { + unsigned int nr; + struct task_struct *reaper; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *tmpcred = current_cred(); ++ const struct cred *itercred; ++#endif ++ filldir_t __filldir = filldir; + struct tgid_iter iter; + struct pid_namespace *ns; + +@@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) + for (iter = next_tgid(ns, iter); + iter.task; + iter.tgid += 1, iter = next_tgid(ns, iter)) { ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ rcu_read_lock(); ++ itercred = __task_cred(iter.task); ++#endif ++ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task) ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ || (tmpcred->uid && (itercred->uid != tmpcred->uid) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID) ++#endif ++ ) ++#endif ++ ) ++ __filldir = &gr_fake_filldir; ++ else ++ __filldir = filldir; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ rcu_read_unlock(); ++#endif + filp->f_pos = iter.tgid + TGID_OFFSET; +- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) { ++ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { + put_task_struct(iter.task); + goto out; + } +@@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = { + #ifdef CONFIG_SCHED_DEBUG + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), + #endif +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUGO, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = { + #ifdef CONFIG_SECURITY + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), + #endif +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUGO, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c +index 82676e3..5f8518a 100644 +--- a/fs/proc/cmdline.c ++++ b/fs/proc/cmdline.c +@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = { + + static int __init proc_cmdline_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops); ++#else + proc_create("cmdline", 0, NULL, &cmdline_proc_fops); ++#endif + return 0; + } + module_init(proc_cmdline_init); +diff --git a/fs/proc/devices.c b/fs/proc/devices.c +index 59ee7da..469b4b6 100644 +--- a/fs/proc/devices.c ++++ b/fs/proc/devices.c +@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = { + + static int __init proc_devices_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations); ++#else + proc_create("devices", 0, NULL, &proc_devinfo_operations); ++#endif + return 0; + } + module_init(proc_devices_init); +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index d78ade3..81767f9 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -18,12 +18,19 @@ + #include <linux/module.h> + #include <linux/smp_lock.h> + #include <linux/sysctl.h> ++#include <linux/grsecurity.h> + + #include <asm/system.h> + #include <asm/uaccess.h> + + #include "internal.h" + ++#ifdef CONFIG_PROC_SYSCTL ++extern const struct inode_operations proc_sys_inode_operations; ++extern const struct inode_operations proc_sys_dir_operations; ++#endif ++ ++ + struct proc_dir_entry *de_get(struct proc_dir_entry *de) + { + atomic_inc(&de->count); +@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode) + de_put(de); + if (PROC_I(inode)->sysctl) + sysctl_head_put(PROC_I(inode)->sysctl); ++ ++#ifdef CONFIG_PROC_SYSCTL ++ if (inode->i_op == &proc_sys_inode_operations || ++ inode->i_op == &proc_sys_dir_operations) ++ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev); ++#endif ++ + clear_inode(inode); + } + +@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, + if (de->mode) { + inode->i_mode = de->mode; + inode->i_uid = de->uid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = de->gid; ++#endif + } + if (de->size) + inode->i_size = de->size; +diff --git a/fs/proc/internal.h b/fs/proc/internal.h +index 753ca37..26bcf3b 100644 +--- a/fs/proc/internal.h ++++ b/fs/proc/internal.h +@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); + extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer); ++#endif + extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); + + extern const struct file_operations proc_maps_operations; +diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c +index b442dac..aab29cb 100644 +--- a/fs/proc/kcore.c ++++ b/fs/proc/kcore.c +@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) + off_t offset = 0; + struct kcore_list *m; + ++ pax_track_stack(); ++ + /* setup ELF header */ + elf = (struct elfhdr *) bufp; + bufp += sizeof(struct elfhdr); +@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + * the addresses in the elf_phdr on our list. + */ + start = kc_offset_to_vaddr(*fpos - elf_buflen); +- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) ++ tsz = PAGE_SIZE - (start & ~PAGE_MASK); ++ if (tsz > buflen) + tsz = buflen; +- ++ + while (buflen) { + struct kcore_list *m; + +@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + kfree(elf_buf); + } else { + if (kern_addr_valid(start)) { +- unsigned long n; ++ char *elf_buf; ++ mm_segment_t oldfs; + +- n = copy_to_user(buffer, (char *)start, tsz); +- /* +- * We cannot distingush between fault on source +- * and fault on destination. When this happens +- * we clear too and hope it will trigger the +- * EFAULT again. +- */ +- if (n) { +- if (clear_user(buffer + tsz - n, +- n)) ++ elf_buf = kmalloc(tsz, GFP_KERNEL); ++ if (!elf_buf) ++ return -ENOMEM; ++ oldfs = get_fs(); ++ set_fs(KERNEL_DS); ++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) { ++ set_fs(oldfs); ++ if (copy_to_user(buffer, elf_buf, tsz)) { ++ kfree(elf_buf); + return -EFAULT; ++ } + } ++ set_fs(oldfs); ++ kfree(elf_buf); + } else { + if (clear_user(buffer, tsz)) + return -EFAULT; +@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + + static int open_kcore(struct inode *inode, struct file *filp) + { ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ return -EPERM; ++#endif + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (kcore_need_update) +diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c +index 7ca7834..cfe90a4 100644 +--- a/fs/proc/kmsg.c ++++ b/fs/proc/kmsg.c +@@ -12,37 +12,37 @@ + #include <linux/poll.h> + #include <linux/proc_fs.h> + #include <linux/fs.h> ++#include <linux/syslog.h> + + #include <asm/uaccess.h> + #include <asm/io.h> + + extern wait_queue_head_t log_wait; + +-extern int do_syslog(int type, char __user *bug, int count); +- + static int kmsg_open(struct inode * inode, struct file * file) + { +- return do_syslog(1,NULL,0); ++ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); + } + + static int kmsg_release(struct inode * inode, struct file * file) + { +- (void) do_syslog(0,NULL,0); ++ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); + return 0; + } + + static ssize_t kmsg_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) + { +- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0)) ++ if ((file->f_flags & O_NONBLOCK) && ++ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) + return -EAGAIN; +- return do_syslog(2, buf, count); ++ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); + } + + static unsigned int kmsg_poll(struct file *file, poll_table *wait) + { + poll_wait(file, &log_wait, wait); +- if (do_syslog(9, NULL, 0)) ++ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) + return POLLIN | POLLRDNORM; + return 0; + } +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c +index a65239c..ad1182a 100644 +--- a/fs/proc/meminfo.c ++++ b/fs/proc/meminfo.c +@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + unsigned long pages[NR_LRU_LISTS]; + int lru; + ++ pax_track_stack(); ++ + /* + * display in kilobytes. + */ +@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + vmi.used >> 10, + vmi.largest_chunk >> 10 + #ifdef CONFIG_MEMORY_FAILURE +- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) ++ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10) + #endif + ); + +diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c +index 9fe7d7e..cdb62c9 100644 +--- a/fs/proc/nommu.c ++++ b/fs/proc/nommu.c +@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) + if (len < 1) + len = 1; + seq_printf(m, "%*c", len, ' '); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\\"); + } + + seq_putc(m, '\n'); +diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c +index 04d1270..25e1173 100644 +--- a/fs/proc/proc_net.c ++++ b/fs/proc/proc_net.c +@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir) + struct task_struct *task; + struct nsproxy *ns; + struct net *net = NULL; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred = current_cred(); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (cred->fsuid) ++ return net; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)) ++ return net; ++#endif + + rcu_read_lock(); + task = pid_task(proc_pid(dir), PIDTYPE_PID); +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index f667e8a..55f4d96 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -7,11 +7,13 @@ + #include <linux/security.h> + #include "internal.h" + ++extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op); ++ + static const struct dentry_operations proc_sys_dentry_operations; + static const struct file_operations proc_sys_file_operations; +-static const struct inode_operations proc_sys_inode_operations; ++const struct inode_operations proc_sys_inode_operations; + static const struct file_operations proc_sys_dir_file_operations; +-static const struct inode_operations proc_sys_dir_operations; ++const struct inode_operations proc_sys_dir_operations; + + static struct inode *proc_sys_make_inode(struct super_block *sb, + struct ctl_table_header *head, struct ctl_table *table) +@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, + if (!p) + goto out; + ++ if (gr_handle_sysctl(p, MAY_EXEC)) ++ goto out; ++ + err = ERR_PTR(-ENOMEM); + inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); + if (h) +@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, + + err = NULL; + dentry->d_op = &proc_sys_dentry_operations; ++ ++ gr_handle_proc_create(dentry, inode); ++ + d_add(dentry, inode); + + out: +@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent, + return -ENOMEM; + } else { + child->d_op = &proc_sys_dentry_operations; ++ ++ gr_handle_proc_create(child, inode); ++ + d_add(child, inode); + } + } else { +@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table, + if (*pos < file->f_pos) + continue; + ++ if (gr_handle_sysctl(table, 0)) ++ continue; ++ + res = proc_sys_fill_cache(file, dirent, filldir, head, table); + if (res) + return res; +@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct + if (IS_ERR(head)) + return PTR_ERR(head); + ++ if (table && gr_handle_sysctl(table, MAY_EXEC)) ++ return -ENOENT; ++ + generic_fillattr(inode, stat); + if (table) + stat->mode = (stat->mode & S_IFMT) | table->mode; +@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = { + }; + + static const struct file_operations proc_sys_dir_file_operations = { ++ .read = generic_read_dir, + .readdir = proc_sys_readdir, + .llseek = generic_file_llseek, + }; + +-static const struct inode_operations proc_sys_inode_operations = { ++const struct inode_operations proc_sys_inode_operations = { + .permission = proc_sys_permission, + .setattr = proc_sys_setattr, + .getattr = proc_sys_getattr, + }; + +-static const struct inode_operations proc_sys_dir_operations = { ++const struct inode_operations proc_sys_dir_operations = { + .lookup = proc_sys_lookup, + .permission = proc_sys_permission, + .setattr = proc_sys_setattr, +diff --git a/fs/proc/root.c b/fs/proc/root.c +index b080b79..d957e63 100644 +--- a/fs/proc/root.c ++++ b/fs/proc/root.c +@@ -134,7 +134,15 @@ void __init proc_root_init(void) + #ifdef CONFIG_PROC_DEVICETREE + proc_device_tree_init(); + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_mkdir("bus", NULL); ++#endif + proc_sys_init(); + } + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 3b7b82a..4b420b0 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -8,6 +8,7 @@ + #include <linux/mempolicy.h> + #include <linux/swap.h> + #include <linux/swapops.h> ++#include <linux/grsecurity.h> + + #include <asm/elf.h> + #include <asm/uaccess.h> +@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + "VmStk:\t%8lu kB\n" + "VmExe:\t%8lu kB\n" + "VmLib:\t%8lu kB\n" +- "VmPTE:\t%8lu kB\n", +- hiwater_vm << (PAGE_SHIFT-10), ++ "VmPTE:\t%8lu kB\n" ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ "CsBase:\t%8lx\nCsLim:\t%8lx\n" ++#endif ++ ++ ,hiwater_vm << (PAGE_SHIFT-10), + (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), + mm->locked_vm << (PAGE_SHIFT-10), + hiwater_rss << (PAGE_SHIFT-10), + total_rss << (PAGE_SHIFT-10), + data << (PAGE_SHIFT-10), + mm->stack_vm << (PAGE_SHIFT-10), text, lib, +- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); ++ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10 ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ , mm->context.user_cs_base, mm->context.user_cs_limit ++#endif ++ ++ ); + } + + unsigned long task_vsize(struct mm_struct *mm) +@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v) + struct proc_maps_private *priv = m->private; + struct vm_area_struct *vma = v; + +- vma_stop(priv, vma); ++ if (!IS_ERR(vma)) ++ vma_stop(priv, vma); + if (priv->task) + put_task_struct(priv->task); + } +@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + { + struct mm_struct *mm = vma->vm_mm; +@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + int flags = vma->vm_flags; + unsigned long ino = 0; + unsigned long long pgoff = 0; +- unsigned long start; + dev_t dev = 0; + int len; + +@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; + } + +- /* We don't show the stack guard page in /proc/maps */ +- start = vma->vm_start; +- if (vma->vm_flags & VM_GROWSDOWN) +- if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) +- start += PAGE_SIZE; +- + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", +- start, ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start, ++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end, ++#else ++ vma->vm_start, + vma->vm_end, ++#endif + flags & VM_READ ? 'r' : '-', + flags & VM_WRITE ? 'w' : '-', + flags & VM_EXEC ? 'x' : '-', + flags & VM_MAYSHARE ? 's' : 'p', ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff, ++#else + pgoff, ++#endif + MAJOR(dev), MINOR(dev), ino, &len); + + /* +@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + */ + if (file) { + pad_len_spaces(m, len); +- seq_path(m, &file->f_path, "\n"); ++ seq_path(m, &file->f_path, "\n\\"); + } else { + const char *name = arch_vma_name(vma); + if (!name) { +@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + if (vma->vm_start <= mm->brk && + vma->vm_end >= mm->start_brk) { + name = "[heap]"; +- } else if (vma->vm_start <= mm->start_stack && +- vma->vm_end >= mm->start_stack) { ++ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) || ++ (vma->vm_start <= mm->start_stack && ++ vma->vm_end >= mm->start_stack)) { + name = "[stack]"; + } + } else { +@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v) + struct proc_maps_private *priv = m->private; + struct task_struct *task = priv->task; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("maps"); ++ return 0; ++ } ++#endif ++ + show_map_vma(m, vma); + + if (m->count < m->size) /* vma is copied successfully */ +@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v) + .private = &mss, + }; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("smaps"); ++ return 0; ++ } ++#endif + memset(&mss, 0, sizeof mss); +- mss.vma = vma; +- if (vma->vm_mm && !is_vm_hugetlb_page(vma)) +- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); ++ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!PAX_RAND_FLAGS(vma->vm_mm)) { ++#endif ++ mss.vma = vma; ++ if (vma->vm_mm && !is_vm_hugetlb_page(vma)) ++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ } ++#endif + + show_map_vma(m, vma); + +@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v) + "Swap: %8lu kB\n" + "KernelPageSize: %8lu kB\n" + "MMUPageSize: %8lu kB\n", ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10, ++#else + (vma->vm_end - vma->vm_start) >> 10, ++#endif + mss.resident >> 10, + (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), + mss.shared_clean >> 10, +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c +index 8f5c05d..c99c76d 100644 +--- a/fs/proc/task_nommu.c ++++ b/fs/proc/task_nommu.c +@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + else + bytes += kobjsize(mm); + +- if (current->fs && current->fs->users > 1) ++ if (current->fs && atomic_read(¤t->fs->users) > 1) + sbytes += kobjsize(current->fs); + else + bytes += kobjsize(current->fs); +@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) + if (len < 1) + len = 1; + seq_printf(m, "%*c", len, ' '); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\\"); + } + + seq_putc(m, '\n'); +diff --git a/fs/readdir.c b/fs/readdir.c +index 7723401..30059a6 100644 +--- a/fs/readdir.c ++++ b/fs/readdir.c +@@ -16,6 +16,7 @@ + #include <linux/security.h> + #include <linux/syscalls.h> + #include <linux/unistd.h> ++#include <linux/namei.h> + + #include <asm/uaccess.h> + +@@ -67,6 +68,7 @@ struct old_linux_dirent { + + struct readdir_callback { + struct old_linux_dirent __user * dirent; ++ struct file * file; + int result; + }; + +@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset + buf->result = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + buf->result++; + dirent = buf->dirent; + if (!access_ok(VERIFY_WRITE, dirent, +@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, + + buf.result = 0; + buf.dirent = dirent; ++ buf.file = file; + + error = vfs_readdir(file, fillonedir, &buf); + if (buf.result) +@@ -142,6 +149,7 @@ struct linux_dirent { + struct getdents_callback { + struct linux_dirent __user * current_dir; + struct linux_dirent __user * previous; ++ struct file * file; + int count; + int error; + }; +@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset, + buf->error = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, + buf.previous = NULL; + buf.count = count; + buf.error = 0; ++ buf.file = file; + + error = vfs_readdir(file, filldir, &buf); + if (error >= 0) +@@ -228,6 +241,7 @@ out: + struct getdents_callback64 { + struct linux_dirent64 __user * current_dir; + struct linux_dirent64 __user * previous; ++ struct file *file; + int count; + int error; + }; +@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset, + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, + + buf.current_dir = dirent; + buf.previous = NULL; ++ buf.file = file; + buf.count = count; + buf.error = 0; + +@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, + error = buf.error; + lastdirent = buf.previous; + if (lastdirent) { +- typeof(lastdirent->d_off) d_off = file->f_pos; ++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos; + if (__put_user(d_off, &lastdirent->d_off)) + error = -EFAULT; + else +diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c +index d42c30c..4fd8718 100644 +--- a/fs/reiserfs/dir.c ++++ b/fs/reiserfs/dir.c +@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, + struct reiserfs_dir_entry de; + int ret = 0; + ++ pax_track_stack(); ++ + reiserfs_write_lock(inode->i_sb); + + reiserfs_check_lock_depth(inode->i_sb, "readdir"); +diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c +index 128d3f7c..8840d44 100644 +--- a/fs/reiserfs/do_balan.c ++++ b/fs/reiserfs/do_balan.c +@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */ + return; + } + +- atomic_inc(&(fs_generation(tb->tb_sb))); ++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb))); + do_balance_starts(tb); + + /* balance leaf returns 0 except if combining L R and S into +diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c +index 72cb1cc..d0e3181 100644 +--- a/fs/reiserfs/item_ops.c ++++ b/fs/reiserfs/item_ops.c +@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi) + vi->vi_index, vi->vi_type, vi->vi_ih); + } + +-static struct item_operations stat_data_ops = { ++static const struct item_operations stat_data_ops = { + .bytes_number = sd_bytes_number, + .decrement_key = sd_decrement_key, + .is_left_mergeable = sd_is_left_mergeable, +@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi) + vi->vi_index, vi->vi_type, vi->vi_ih); + } + +-static struct item_operations direct_ops = { ++static const struct item_operations direct_ops = { + .bytes_number = direct_bytes_number, + .decrement_key = direct_decrement_key, + .is_left_mergeable = direct_is_left_mergeable, +@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi) + vi->vi_index, vi->vi_type, vi->vi_ih); + } + +-static struct item_operations indirect_ops = { ++static const struct item_operations indirect_ops = { + .bytes_number = indirect_bytes_number, + .decrement_key = indirect_decrement_key, + .is_left_mergeable = indirect_is_left_mergeable, +@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi) + printk("\n"); + } + +-static struct item_operations direntry_ops = { ++static const struct item_operations direntry_ops = { + .bytes_number = direntry_bytes_number, + .decrement_key = direntry_decrement_key, + .is_left_mergeable = direntry_is_left_mergeable, +@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi) + "Invalid item type observed, run fsck ASAP"); + } + +-static struct item_operations errcatch_ops = { ++static const struct item_operations errcatch_ops = { + errcatch_bytes_number, + errcatch_decrement_key, + errcatch_is_left_mergeable, +@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = { + #error Item types must use disk-format assigned values. + #endif + +-struct item_operations *item_ops[TYPE_ANY + 1] = { ++const struct item_operations * const item_ops[TYPE_ANY + 1] = { + &stat_data_ops, + &indirect_ops, + &direct_ops, +diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c +index b5fe0aa..e0e25c4 100644 +--- a/fs/reiserfs/journal.c ++++ b/fs/reiserfs/journal.c +@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev, + struct buffer_head *bh; + int i, j; + ++ pax_track_stack(); ++ + bh = __getblk(dev, block, bufsize); + if (buffer_uptodate(bh)) + return (bh); +diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c +index 2715791..b8996db 100644 +--- a/fs/reiserfs/namei.c ++++ b/fs/reiserfs/namei.c +@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, + unsigned long savelink = 1; + struct timespec ctime; + ++ pax_track_stack(); ++ + /* three balancings: (1) old name removal, (2) new name insertion + and (3) maybe "save" link insertion + stat data updates: (1) old directory, +diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c +index 9229e55..3d2e3b7 100644 +--- a/fs/reiserfs/procfs.c ++++ b/fs/reiserfs/procfs.c +@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb) + "SMALL_TAILS " : "NO_TAILS ", + replay_only(sb) ? "REPLAY_ONLY " : "", + convert_reiserfs(sb) ? "CONV " : "", +- atomic_read(&r->s_generation_counter), ++ atomic_read_unchecked(&r->s_generation_counter), + SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), + SF(s_do_balance), SF(s_unneeded_left_neighbor), + SF(s_good_search_by_key_reada), SF(s_bmaps), +@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb) + struct journal_params *jp = &rs->s_v1.s_journal; + char b[BDEVNAME_SIZE]; + ++ pax_track_stack(); ++ + seq_printf(m, /* on-disk fields */ + "jp_journal_1st_block: \t%i\n" + "jp_journal_dev: \t%s[%x]\n" +diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c +index d036ee5..4c7dca1 100644 +--- a/fs/reiserfs/stree.c ++++ b/fs/reiserfs/stree.c +@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, + int iter = 0; + #endif + ++ pax_track_stack(); ++ + BUG_ON(!th->t_trans_id); + + init_tb_struct(th, &s_del_balance, sb, path, +@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, + int retval; + int quota_cut_bytes = 0; + ++ pax_track_stack(); ++ + BUG_ON(!th->t_trans_id); + + le_key2cpu_key(&cpu_key, key); +@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, + int quota_cut_bytes; + loff_t tail_pos = 0; + ++ pax_track_stack(); ++ + BUG_ON(!th->t_trans_id); + + init_tb_struct(th, &s_cut_balance, inode->i_sb, path, +@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree + int retval; + int fs_gen; + ++ pax_track_stack(); ++ + BUG_ON(!th->t_trans_id); + + fs_gen = get_generation(inode->i_sb); +@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, + int fs_gen = 0; + int quota_bytes = 0; + ++ pax_track_stack(); ++ + BUG_ON(!th->t_trans_id); + + if (inode) { /* Do we count quotas for item? */ +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index 7cb1285..c726cd0 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin + {.option_name = NULL} + }; + ++ pax_track_stack(); ++ + *blocks = 0; + if (!options || !*options) + /* use default configuration: create tails, journaling on, no +diff --git a/fs/select.c b/fs/select.c +index fd38ce2..f5381b8 100644 +--- a/fs/select.c ++++ b/fs/select.c +@@ -20,6 +20,7 @@ + #include <linux/module.h> + #include <linux/slab.h> + #include <linux/poll.h> ++#include <linux/security.h> + #include <linux/personality.h> /* for STICKY_TIMEOUTS */ + #include <linux/file.h> + #include <linux/fdtable.h> +@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) + int retval, i, timed_out = 0; + unsigned long slack = 0; + ++ pax_track_stack(); ++ + rcu_read_lock(); + retval = max_select_fd(n, fds); + rcu_read_unlock(); +@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, + /* Allocate small arguments on the stack to save memory and be faster */ + long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; + ++ pax_track_stack(); ++ + ret = -EINVAL; + if (n < 0) + goto out_nofds; +@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, + struct poll_list *walk = head; + unsigned long todo = nfds; + ++ pax_track_stack(); ++ ++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1); + if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur) + return -EINVAL; + +diff --git a/fs/seq_file.c b/fs/seq_file.c +index eae7d9d..b7613c6 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -9,6 +9,7 @@ + #include <linux/module.h> + #include <linux/seq_file.h> + #include <linux/slab.h> ++#include <linux/sched.h> + + #include <asm/uaccess.h> + #include <asm/page.h> +@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op) + memset(p, 0, sizeof(*p)); + mutex_init(&p->lock); + p->op = op; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ p->exec_id = current->exec_id; ++#endif + + /* + * Wrappers around seq_open(e.g. swaps_open) need to be +@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v) + int single_open(struct file *file, int (*show)(struct seq_file *, void *), + void *data) + { +- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); ++ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL); + int res = -ENOMEM; + + if (op) { +diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c +index 71c29b6..54694dd 100644 +--- a/fs/smbfs/proc.c ++++ b/fs/smbfs/proc.c +@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp) + + out: + if (server->local_nls != NULL && server->remote_nls != NULL) +- server->ops->convert = convert_cp; ++ *(void **)&server->ops->convert = convert_cp; + else +- server->ops->convert = convert_memcpy; ++ *(void **)&server->ops->convert = convert_memcpy; + + smb_unlock_server(server); + return n; +@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt) + + /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */ + if (server->mnt->flags & SMB_MOUNT_OLDATTR) { +- server->ops->getattr = smb_proc_getattr_core; ++ *(void **)&server->ops->getattr = smb_proc_getattr_core; + } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) { +- server->ops->getattr = smb_proc_getattr_ff; ++ *(void **)&server->ops->getattr = smb_proc_getattr_ff; + } + + /* Decode server capabilities */ +@@ -3439,7 +3439,7 @@ out: + static void + install_ops(struct smb_ops *dst, struct smb_ops *src) + { +- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC); ++ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC); + } + + /* < LANMAN2 */ +diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c +index 00b2909..2ace383 100644 +--- a/fs/smbfs/symlink.c ++++ b/fs/smbfs/symlink.c +@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd) + + static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + __putname(s); + } +diff --git a/fs/splice.c b/fs/splice.c +index bb92b7c5..5aa72b0 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + pipe_lock(pipe); + + for (;;) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + do_wakeup = 0; + } + +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, + .spd_release = spd_release_page, + }; + ++ pax_track_stack(); ++ + index = *ppos >> PAGE_CACHE_SHIFT; + loff = *ppos & ~PAGE_CACHE_MASK; + req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; +@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); ++ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos); + set_fs(old_fs); + + return res; +@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_write(file, (const char __user *)buf, count, &pos); ++ res = vfs_write(file, (const char __force_user *)buf, count, &pos); + set_fs(old_fs); + + return res; +@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, + .spd_release = spd_release_page, + }; + ++ pax_track_stack(); ++ + index = *ppos >> PAGE_CACHE_SHIFT; + offset = *ppos & ~PAGE_CACHE_MASK; + nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; +@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, + goto err; + + this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); +- vec[i].iov_base = (void __user *) page_address(page); ++ vec[i].iov_base = (__force void __user *) page_address(page); + vec[i].iov_len = this_len; + pages[i] = page; + spd.nr_pages++; +@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed); + int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) + { + while (!pipe->nrbufs) { +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + return 0; + +- if (!pipe->waiting_writers && sd->num_spliced) ++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced) + return 0; + + if (sd->flags & SPLICE_F_NONBLOCK) +@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + * out of the pipe right after the splice_to_pipe(). So set + * PIPE_READERS appropriately. + */ +- pipe->readers = 1; ++ atomic_set(&pipe->readers, 1); + + current->splice_pipe = pipe; + } +@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, + .spd_release = spd_release_page, + }; + ++ pax_track_stack(); ++ + pipe = pipe_info(file->f_path.dentry->d_inode); + if (!pipe) + return -EBADF; +@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + ret = -ERESTARTSYS; + break; + } +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + if (flags & SPLICE_F_NONBLOCK) { + ret = -EAGAIN; + break; +@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + pipe_lock(pipe); + + while (pipe->nrbufs >= PIPE_BUFFERS) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + break; +@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + ret = -ERESTARTSYS; + break; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -1786,14 +1792,14 @@ retry: + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; + break; + } + +- if (!ipipe->nrbufs && !ipipe->writers) ++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers)) + break; + + /* +@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, + * return EAGAIN if we have the potential of some data in the + * future, otherwise just return 0 + */ +- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK)) + ret = -EAGAIN; + + pipe_unlock(ipipe); +diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c +index e020183..18d64b4 100644 +--- a/fs/sysfs/dir.c ++++ b/fs/sysfs/dir.c +@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd, + struct sysfs_dirent *sd; + int rc; + ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT ++ const char *parent_name = parent_sd->s_name; ++ ++ mode = S_IFDIR | S_IRWXU; ++ ++ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) || ++ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) || ++ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) || ++ (!strcmp(parent_name, "system") && !strcmp(name, "cpu"))) ++ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; ++#endif ++ + /* allocate */ + sd = sysfs_new_dirent(name, mode, SYSFS_DIR); + if (!sd) +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index 7118a38..70af853 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock); + + struct sysfs_open_dirent { + atomic_t refcnt; +- atomic_t event; ++ atomic_unchecked_t event; + wait_queue_head_t poll; + struct list_head buffers; /* goes through sysfs_buffer.list */ + }; +@@ -53,7 +53,7 @@ struct sysfs_buffer { + size_t count; + loff_t pos; + char * page; +- struct sysfs_ops * ops; ++ const struct sysfs_ops * ops; + struct mutex mutex; + int needs_read_fill; + int event; +@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer + { + struct sysfs_dirent *attr_sd = dentry->d_fsdata; + struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; +- struct sysfs_ops * ops = buffer->ops; ++ const struct sysfs_ops * ops = buffer->ops; + int ret = 0; + ssize_t count; + +@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer + if (!sysfs_get_active_two(attr_sd)) + return -ENODEV; + +- buffer->event = atomic_read(&attr_sd->s_attr.open->event); ++ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event); + count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page); + + sysfs_put_active_two(attr_sd); +@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t + { + struct sysfs_dirent *attr_sd = dentry->d_fsdata; + struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; +- struct sysfs_ops * ops = buffer->ops; ++ const struct sysfs_ops * ops = buffer->ops; + int rc; + + /* need attr_sd for attr and ops, its parent for kobj */ +@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd, + return -ENOMEM; + + atomic_set(&new_od->refcnt, 0); +- atomic_set(&new_od->event, 1); ++ atomic_set_unchecked(&new_od->event, 1); + init_waitqueue_head(&new_od->poll); + INIT_LIST_HEAD(&new_od->buffers); + goto retry; +@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) + struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; + struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; + struct sysfs_buffer *buffer; +- struct sysfs_ops *ops; ++ const struct sysfs_ops *ops; + int error = -EACCES; + char *p; + +@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait) + + sysfs_put_active_two(attr_sd); + +- if (buffer->event != atomic_read(&od->event)) ++ if (buffer->event != atomic_read_unchecked(&od->event)) + goto trigger; + + return DEFAULT_POLLMASK; +@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd) + + od = sd->s_attr.open; + if (od) { +- atomic_inc(&od->event); ++ atomic_inc_unchecked(&od->event); + wake_up_interruptible(&od->poll); + } + +diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c +index c5081ad..342ea86 100644 +--- a/fs/sysfs/symlink.c ++++ b/fs/sysfs/symlink.c +@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd) + + static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) + { +- char *page = nd_get_link(nd); ++ const char *page = nd_get_link(nd); + if (!IS_ERR(page)) + free_page((unsigned long)page); + } +diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c +index 1e06853..b06d325 100644 +--- a/fs/udf/balloc.c ++++ b/fs/udf/balloc.c +@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb, + + mutex_lock(&sbi->s_alloc_mutex); + partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; +- if (bloc->logicalBlockNum < 0 || +- (bloc->logicalBlockNum + count) > +- partmap->s_partition_len) { ++ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) { + udf_debug("%d < %d || %d + %d > %d\n", + bloc->logicalBlockNum, 0, bloc->logicalBlockNum, + count, partmap->s_partition_len); +@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb, + + mutex_lock(&sbi->s_alloc_mutex); + partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; +- if (bloc->logicalBlockNum < 0 || +- (bloc->logicalBlockNum + count) > +- partmap->s_partition_len) { ++ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) { + udf_debug("%d < %d || %d + %d > %d\n", + bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, + partmap->s_partition_len); +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 6d24c2c..fff470f 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, + int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; + int lastblock = 0; + ++ pax_track_stack(); ++ + prev_epos.offset = udf_file_entry_alloc_offset(inode); + prev_epos.block = iinfo->i_location; + prev_epos.bh = NULL; +diff --git a/fs/udf/misc.c b/fs/udf/misc.c +index 9215700..bf1f68e 100644 +--- a/fs/udf/misc.c ++++ b/fs/udf/misc.c +@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, + + u8 udf_tag_checksum(const struct tag *t) + { +- u8 *data = (u8 *)t; ++ const u8 *data = (const u8 *)t; + u8 checksum = 0; + int i; + for (i = 0; i < sizeof(struct tag); ++i) +diff --git a/fs/utimes.c b/fs/utimes.c +index e4c75db..b4df0e0 100644 +--- a/fs/utimes.c ++++ b/fs/utimes.c +@@ -1,6 +1,7 @@ + #include <linux/compiler.h> + #include <linux/file.h> + #include <linux/fs.h> ++#include <linux/security.h> + #include <linux/linkage.h> + #include <linux/mount.h> + #include <linux/namei.h> +@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times) + goto mnt_drop_write_and_out; + } + } ++ ++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) { ++ error = -EACCES; ++ goto mnt_drop_write_and_out; ++ } ++ + mutex_lock(&inode->i_mutex); + error = notify_change(path->dentry, &newattrs); + mutex_unlock(&inode->i_mutex); +diff --git a/fs/xattr.c b/fs/xattr.c +index 6d4f6d3..cda3958 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr); + * Extended attribute SET operations + */ + static long +-setxattr(struct dentry *d, const char __user *name, const void __user *value, ++setxattr(struct path *path, const char __user *name, const void __user *value, + size_t size, int flags) + { + int error; +@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value, + return PTR_ERR(kvalue); + } + +- error = vfs_setxattr(d, kname, kvalue, size, flags); ++ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) { ++ error = -EACCES; ++ goto out; ++ } ++ ++ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags); ++out: + kfree(kvalue); + return error; + } +@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname, + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = setxattr(path.dentry, name, value, size, flags); ++ error = setxattr(&path, name, value, size, flags); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname, + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = setxattr(path.dentry, name, value, size, flags); ++ error = setxattr(&path, name, value, size, flags); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, + const void __user *,value, size_t, size, int, flags) + { + struct file *f; +- struct dentry *dentry; + int error = -EBADF; + + f = fget(fd); + if (!f) + return error; +- dentry = f->f_path.dentry; +- audit_inode(NULL, dentry); ++ audit_inode(NULL, f->f_path.dentry); + error = mnt_want_write_file(f); + if (!error) { +- error = setxattr(dentry, name, value, size, flags); ++ error = setxattr(&f->f_path, name, value, size, flags); + mnt_drop_write(f->f_path.mnt); + } + fput(f); +diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c +index c6ad7c7..f2847a7 100644 +--- a/fs/xattr_acl.c ++++ b/fs/xattr_acl.c +@@ -17,8 +17,8 @@ + struct posix_acl * + posix_acl_from_xattr(const void *value, size_t size) + { +- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value; +- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end; ++ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value; ++ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end; + int count; + struct posix_acl *acl; + struct posix_acl_entry *acl_e; +diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c +index 942362f..88f96f5 100644 +--- a/fs/xfs/linux-2.6/xfs_ioctl.c ++++ b/fs/xfs/linux-2.6/xfs_ioctl.c +@@ -134,7 +134,7 @@ xfs_find_handle( + } + + error = -EFAULT; +- if (copy_to_user(hreq->ohandle, &handle, hsize) || ++ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) || + copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) + goto out_put; + +@@ -423,7 +423,7 @@ xfs_attrlist_by_handle( + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + +- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); ++ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL); + if (!kbuf) + goto out_dput; + +@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1( + xfs_mount_t *mp, + void __user *arg) + { +- xfs_fsop_geom_t fsgeo; ++ xfs_fsop_geom_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, &fsgeo, 3); +diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c +index bad485a..479bd32 100644 +--- a/fs/xfs/linux-2.6/xfs_ioctl32.c ++++ b/fs/xfs/linux-2.6/xfs_ioctl32.c +@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1( + xfs_fsop_geom_t fsgeo; + int error; + ++ memset(&fsgeo, 0, sizeof(fsgeo)); + error = xfs_fs_geometry(mp, &fsgeo, 3); + if (error) + return -error; +diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c +index 1f3b4b8..6102f6d 100644 +--- a/fs/xfs/linux-2.6/xfs_iops.c ++++ b/fs/xfs/linux-2.6/xfs_iops.c +@@ -468,7 +468,7 @@ xfs_vn_put_link( + struct nameidata *nd, + void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + + if (!IS_ERR(s)) + kfree(s); +diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c +index 8971fb0..5fc1eb2 100644 +--- a/fs/xfs/xfs_bmap.c ++++ b/fs/xfs/xfs_bmap.c +@@ -360,7 +360,7 @@ xfs_bmap_validate_ret( + int nmap, + int ret_nmap); + #else +-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) ++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0) + #endif /* DEBUG */ + + #if defined(XFS_RW_TRACE) +diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c +index e89734e..5e84d8d 100644 +--- a/fs/xfs/xfs_dir2_sf.c ++++ b/fs/xfs/xfs_dir2_sf.c +@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents( + } + + ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep)); +- if (filldir(dirent, sfep->name, sfep->namelen, ++ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) { ++ char name[sfep->namelen]; ++ memcpy(name, sfep->name, sfep->namelen); ++ if (filldir(dirent, name, sfep->namelen, ++ off & 0x7fffffff, ino, DT_UNKNOWN)) { ++ *offset = off & 0x7fffffff; ++ return 0; ++ } ++ } else if (filldir(dirent, sfep->name, sfep->namelen, + off & 0x7fffffff, ino, DT_UNKNOWN)) { + *offset = off & 0x7fffffff; + return 0; +diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c +index 8f32f50..b6a41e8 100644 +--- a/fs/xfs/xfs_vnodeops.c ++++ b/fs/xfs/xfs_vnodeops.c +@@ -564,13 +564,18 @@ xfs_readlink( + + xfs_ilock(ip, XFS_ILOCK_SHARED); + +- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK); +- ASSERT(ip->i_d.di_size <= MAXPATHLEN); +- + pathlen = ip->i_d.di_size; + if (!pathlen) + goto out; + ++ if (pathlen > MAXPATHLEN) { ++ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long", ++ __func__, (unsigned long long)ip->i_ino, pathlen); ++ ASSERT(0); ++ error = XFS_ERROR(EFSCORRUPTED); ++ goto out; ++ } ++ + if (ip->i_df.if_flags & XFS_IFINLINE) { + memcpy(link, ip->i_df.if_u1.if_data, pathlen); + link[pathlen] = '\0'; +diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig +new file mode 100644 +index 0000000..5be91c0 +--- /dev/null ++++ b/grsecurity/Kconfig +@@ -0,0 +1,1078 @@ ++# ++# grecurity configuration ++# ++ ++menu "Grsecurity" ++ ++config GRKERNSEC ++ bool "Grsecurity" ++ select CRYPTO ++ select CRYPTO_SHA256 ++ help ++ If you say Y here, you will be able to configure many features ++ that will enhance the security of your system. It is highly ++ recommended that you say Y here and read through the help ++ for each option so that you fully understand the features and ++ can evaluate their usefulness for your machine. ++ ++choice ++ prompt "Security Level" ++ depends on GRKERNSEC ++ default GRKERNSEC_CUSTOM ++ ++config GRKERNSEC_LOW ++ bool "Low" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_CHDIR ++ ++ help ++ If you choose this option, several of the grsecurity options will ++ be enabled that will give you greater protection against a number ++ of attacks, while assuring that none of your software will have any ++ conflicts with the additional security measures. If you run a lot ++ of unusual software, or you are having problems with the higher ++ security levels, you should say Y here. With this option, the ++ following features are enabled: ++ ++ - Linking restrictions ++ - FIFO restrictions ++ - Restricted dmesg ++ - Enforced chdir("/") on chroot ++ - Runtime module disabling ++ ++config GRKERNSEC_MEDIUM ++ bool "Medium" ++ select PAX ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_USERGROUP ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB)) ++ ++ help ++ If you say Y here, several features in addition to those included ++ in the low additional security level will be enabled. These ++ features provide even more security to your system, though in rare ++ cases they may be incompatible with very old or poorly written ++ software. If you enable this option, make sure that your auth ++ service (identd) is running as gid 1001. With this option, ++ the following features (in addition to those provided in the ++ low additional security level) will be enabled: ++ ++ - Failed fork logging ++ - Time change logging ++ - Signal logging ++ - Deny mounts in chroot ++ - Deny double chrooting ++ - Deny sysctl writes in chroot ++ - Deny mknod in chroot ++ - Deny access to abstract AF_UNIX sockets out of chroot ++ - Deny pivot_root in chroot ++ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port ++ - /proc restrictions with special GID set to 10 (usually wheel) ++ - Address Space Layout Randomization (ASLR) ++ - Prevent exploitation of most refcount overflows ++ - Bounds checking of copying between the kernel and userland ++ ++config GRKERNSEC_HIGH ++ bool "High" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SHMAT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_FCHDIR ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_CHROOT_CAPS ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_CHROOT_FINDTASK ++ select GRKERNSEC_SYSFS_RESTRICT ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_HIDESYM ++ select GRKERNSEC_BRUTE ++ select GRKERNSEC_PROC_USERGROUP ++ select GRKERNSEC_KMEM ++ select GRKERNSEC_RESLOG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_PROC_ADD ++ select GRKERNSEC_CHROOT_CHMOD ++ select GRKERNSEC_CHROOT_NICE ++ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS) ++ select GRKERNSEC_AUDIT_MOUNT ++ select GRKERNSEC_MODHARDEN if (MODULES) ++ select GRKERNSEC_HARDEN_PTRACE ++ select GRKERNSEC_PTRACE_READEXEC ++ select GRKERNSEC_VM86 if (X86_32) ++ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC) ++ select PAX ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_NOEXEC ++ select PAX_MPROTECT ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN) ++ select PAX_MEMORY_UDEREF if (X86 && !XEN) ++ select PAX_RANDKSTACK if (X86_TSC && X86) ++ select PAX_SEGMEXEC if (X86_32) ++ select PAX_PAGEEXEC ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC) ++ select PAX_EMUTRAMP if (PARISC) ++ select PAX_EMUSIGRT if (PARISC) ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) ++ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86)) ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB)) ++ help ++ If you say Y here, many of the features of grsecurity will be ++ enabled, which will protect you against many kinds of attacks ++ against your system. The heightened security comes at a cost ++ of an increased chance of incompatibilities with rare software ++ on your machine. Since this security level enables PaX, you should ++ view <http://pax.grsecurity.net> and read about the PaX ++ project. While you are there, download chpax and run it on ++ binaries that cause problems with PaX. Also remember that ++ since the /proc restrictions are enabled, you must run your ++ identd as gid 1001. This security level enables the following ++ features in addition to those listed in the low and medium ++ security levels: ++ ++ - Additional /proc restrictions ++ - Chmod restrictions in chroot ++ - No signals, ptrace, or viewing of processes outside of chroot ++ - Capability restrictions in chroot ++ - Deny fchdir out of chroot ++ - Priority restrictions in chroot ++ - Segmentation-based implementation of PaX ++ - Mprotect restrictions ++ - Removal of addresses from /proc/<pid>/[smaps|maps|stat] ++ - Kernel stack randomization ++ - Mount/unmount/remount logging ++ - Kernel symbol hiding ++ - Hardening of module auto-loading ++ - Ptrace restrictions ++ - Restricted vm86 mode ++ - Restricted sysfs/debugfs ++ - Active kernel exploit response ++ ++config GRKERNSEC_CUSTOM ++ bool "Custom" ++ help ++ If you say Y here, you will be able to configure every grsecurity ++ option, which allows you to enable many more features that aren't ++ covered in the basic security levels. These additional features ++ include TPE, socket restrictions, and the sysctl system for ++ grsecurity. It is advised that you read through the help for ++ each option to determine its usefulness in your situation. ++ ++endchoice ++ ++menu "Memory Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_KMEM ++ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port" ++ select STRICT_DEVMEM if (X86 || ARM || TILE || S390) ++ help ++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to ++ be written to or read from to modify or leak the contents of the running ++ kernel. /dev/port will also not be allowed to be opened. If you have module ++ support disabled, enabling this will close up four ways that are ++ currently used to insert malicious code into the running kernel. ++ Even with all these features enabled, we still highly recommend that ++ you use the RBAC system, as it is still possible for an attacker to ++ modify the running kernel through privileged I/O granted by ioperm/iopl. ++ If you are not using XFree86, you may be able to stop this additional ++ case by enabling the 'Disable privileged I/O' option. Though nothing ++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem, ++ but only to video memory, which is the only writing we allow in this ++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will ++ not be allowed to mprotect it with PROT_WRITE later. ++ It is highly recommended that you say Y here if you meet all the ++ conditions above. ++ ++config GRKERNSEC_VM86 ++ bool "Restrict VM86 mode" ++ depends on X86_32 ++ ++ help ++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to ++ make use of a special execution mode on 32bit x86 processors called ++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain ++ video cards and will still work with this option enabled. The purpose ++ of the option is to prevent exploitation of emulation errors in ++ virtualization of vm86 mode like the one discovered in VMWare in 2009. ++ Nearly all users should be able to enable this option. ++ ++config GRKERNSEC_IO ++ bool "Disable privileged I/O" ++ depends on X86 ++ select RTC_CLASS ++ select RTC_INTF_DEV ++ select RTC_DRV_CMOS ++ ++ help ++ If you say Y here, all ioperm and iopl calls will return an error. ++ Ioperm and iopl can be used to modify the running kernel. ++ Unfortunately, some programs need this access to operate properly, ++ the most notable of which are XFree86 and hwclock. hwclock can be ++ remedied by having RTC support in the kernel, so real-time ++ clock support is enabled if this option is enabled, to ensure ++ that hwclock operates correctly. XFree86 still will not ++ operate correctly with this option enabled, so DO NOT CHOOSE Y ++ IF YOU USE XFree86. If you use XFree86 and you still want to ++ protect your kernel against modification, use the RBAC system. ++ ++config GRKERNSEC_PROC_MEMMAP ++ bool "Harden ASLR against information leaks and entropy reduction" ++ default y if (PAX_NOEXEC || PAX_ASLR) ++ depends on PAX_NOEXEC || PAX_ASLR ++ help ++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will ++ give no information about the addresses of its mappings if ++ PaX features that rely on random addresses are enabled on the task. ++ In addition to sanitizing this information and disabling other ++ dangerous sources of information, this option causes reads of sensitive ++ /proc/<pid> entries where the file descriptor was opened in a different ++ task than the one performing the read. Such attempts are logged. ++ This option also limits argv/env strings for suid/sgid binaries ++ to 512KB to prevent a complete exhaustion of the stack entropy provided ++ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid ++ binaries to prevent alternative mmap layouts from being abused. ++ ++ If you use PaX it is essential that you say Y here as it closes up ++ several holes that make full ASLR useless locally. ++ ++config GRKERNSEC_BRUTE ++ bool "Deter exploit bruteforcing" ++ help ++ If you say Y here, attempts to bruteforce exploits against forking ++ daemons such as apache or sshd, as well as against suid/sgid binaries ++ will be deterred. When a child of a forking daemon is killed by PaX ++ or crashes due to an illegal instruction or other suspicious signal, ++ the parent process will be delayed 30 seconds upon every subsequent ++ fork until the administrator is able to assess the situation and ++ restart the daemon. ++ In the suid/sgid case, the attempt is logged, the user has all their ++ processes terminated, and they are prevented from executing any further ++ processes for 15 minutes. ++ It is recommended that you also enable signal logging in the auditing ++ section so that logs are generated when a process triggers a suspicious ++ signal. ++ If the sysctl option is enabled, a sysctl option with name ++ "deter_bruteforce" is created. ++ ++config GRKERNSEC_MODHARDEN ++ bool "Harden module auto-loading" ++ depends on MODULES ++ help ++ If you say Y here, module auto-loading in response to use of some ++ feature implemented by an unloaded module will be restricted to ++ root users. Enabling this option helps defend against attacks ++ by unprivileged users who abuse the auto-loading behavior to ++ cause a vulnerable module to load that is then exploited. ++ ++ If this option prevents a legitimate use of auto-loading for a ++ non-root user, the administrator can execute modprobe manually ++ with the exact name of the module mentioned in the alert log. ++ Alternatively, the administrator can add the module to the list ++ of modules loaded at boot by modifying init scripts. ++ ++ Modification of init scripts will most likely be needed on ++ Ubuntu servers with encrypted home directory support enabled, ++ as the first non-root user logging in will cause the ecb(aes), ++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded. ++ ++config GRKERNSEC_HIDESYM ++ bool "Hide kernel symbols" ++ help ++ If you say Y here, getting information on loaded modules, and ++ displaying all kernel symbols through a syscall will be restricted ++ to users with CAP_SYS_MODULE. For software compatibility reasons, ++ /proc/kallsyms will be restricted to the root user. The RBAC ++ system can hide that entry even from root. ++ ++ This option also prevents leaking of kernel addresses through ++ several /proc entries. ++ ++ Note that this option is only effective provided the following ++ conditions are met: ++ 1) The kernel using grsecurity is not precompiled by some distribution ++ 2) You have also enabled GRKERNSEC_DMESG ++ 3) You are using the RBAC system and hiding other files such as your ++ kernel image and System.map. Alternatively, enabling this option ++ causes the permissions on /boot, /lib/modules, and the kernel ++ source directory to change at compile time to prevent ++ reading by non-root users. ++ If the above conditions are met, this option will aid in providing a ++ useful protection against local kernel exploitation of overflows ++ and arbitrary read/write vulnerabilities. ++ ++config GRKERNSEC_KERN_LOCKOUT ++ bool "Active kernel exploit response" ++ depends on X86 || ARM || PPC || SPARC ++ help ++ If you say Y here, when a PaX alert is triggered due to suspicious ++ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY) ++ or an OOPs occurs due to bad memory accesses, instead of just ++ terminating the offending process (and potentially allowing ++ a subsequent exploit from the same user), we will take one of two ++ actions: ++ If the user was root, we will panic the system ++ If the user was non-root, we will log the attempt, terminate ++ all processes owned by the user, then prevent them from creating ++ any new processes until the system is restarted ++ This deters repeated kernel exploitation/bruteforcing attempts ++ and is useful for later forensics. ++ ++endmenu ++menu "Role Based Access Control Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_RBAC_DEBUG ++ bool ++ ++config GRKERNSEC_NO_RBAC ++ bool "Disable RBAC system" ++ help ++ If you say Y here, the /dev/grsec device will be removed from the kernel, ++ preventing the RBAC system from being enabled. You should only say Y ++ here if you have no intention of using the RBAC system, so as to prevent ++ an attacker with root access from misusing the RBAC system to hide files ++ and processes when loadable module support and /dev/[k]mem have been ++ locked down. ++ ++config GRKERNSEC_ACL_HIDEKERN ++ bool "Hide kernel processes" ++ help ++ If you say Y here, all kernel threads will be hidden to all ++ processes but those whose subject has the "view hidden processes" ++ flag. ++ ++config GRKERNSEC_ACL_MAXTRIES ++ int "Maximum tries before password lockout" ++ default 3 ++ help ++ This option enforces the maximum number of times a user can attempt ++ to authorize themselves with the grsecurity RBAC system before being ++ denied the ability to attempt authorization again for a specified time. ++ The lower the number, the harder it will be to brute-force a password. ++ ++config GRKERNSEC_ACL_TIMEOUT ++ int "Time to wait after max password tries, in seconds" ++ default 30 ++ help ++ This option specifies the time the user must wait after attempting to ++ authorize to the RBAC system with the maximum number of invalid ++ passwords. The higher the number, the harder it will be to brute-force ++ a password. ++ ++endmenu ++menu "Filesystem Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_PROC ++ bool "Proc restrictions" ++ help ++ If you say Y here, the permissions of the /proc filesystem ++ will be altered to enhance system security and privacy. You MUST ++ choose either a user only restriction or a user and group restriction. ++ Depending upon the option you choose, you can either restrict users to ++ see only the processes they themselves run, or choose a group that can ++ view all processes and files normally restricted to root if you choose ++ the "restrict to user only" option. NOTE: If you're running identd or ++ ntpd as a non-root user, you will have to run it as the group you ++ specify here. ++ ++config GRKERNSEC_PROC_USER ++ bool "Restrict /proc to user only" ++ depends on GRKERNSEC_PROC ++ help ++ If you say Y here, non-root users will only be able to view their own ++ processes, and restricts them from viewing network-related information, ++ and viewing kernel symbol and module information. ++ ++config GRKERNSEC_PROC_USERGROUP ++ bool "Allow special group" ++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER ++ help ++ If you say Y here, you will be able to select a group that will be ++ able to view all processes and network-related information. If you've ++ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still ++ remain hidden. This option is useful if you want to run identd as ++ a non-root user. ++ ++config GRKERNSEC_PROC_GID ++ int "GID for special group" ++ depends on GRKERNSEC_PROC_USERGROUP ++ default 1001 ++ ++config GRKERNSEC_PROC_ADD ++ bool "Additional restrictions" ++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP ++ help ++ If you say Y here, additional restrictions will be placed on ++ /proc that keep normal users from viewing device information and ++ slabinfo information that could be useful for exploits. ++ ++config GRKERNSEC_LINK ++ bool "Linking restrictions" ++ help ++ If you say Y here, /tmp race exploits will be prevented, since users ++ will no longer be able to follow symlinks owned by other users in ++ world-writable +t directories (e.g. /tmp), unless the owner of the ++ symlink is the owner of the directory. users will also not be ++ able to hardlink to files they do not own. If the sysctl option is ++ enabled, a sysctl option with name "linking_restrictions" is created. ++ ++config GRKERNSEC_FIFO ++ bool "FIFO restrictions" ++ help ++ If you say Y here, users will not be able to write to FIFOs they don't ++ own in world-writable +t directories (e.g. /tmp), unless the owner of ++ the FIFO is the same owner of the directory it's held in. If the sysctl ++ option is enabled, a sysctl option with name "fifo_restrictions" is ++ created. ++ ++config GRKERNSEC_SYSFS_RESTRICT ++ bool "Sysfs/debugfs restriction" ++ depends on SYSFS ++ help ++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and ++ any filesystem normally mounted under it (e.g. debugfs) will be ++ mostly accessible only by root. These filesystems generally provide access ++ to hardware and debug information that isn't appropriate for unprivileged ++ users of the system. Sysfs and debugfs have also become a large source ++ of new vulnerabilities, ranging from infoleaks to local compromise. ++ There has been very little oversight with an eye toward security involved ++ in adding new exporters of information to these filesystems, so their ++ use is discouraged. ++ For reasons of compatibility, a few directories have been whitelisted ++ for access by non-root users: ++ /sys/fs/selinux ++ /sys/fs/fuse ++ /sys/devices/system/cpu ++ ++config GRKERNSEC_ROFS ++ bool "Runtime read-only mount protection" ++ help ++ If you say Y here, a sysctl option with name "romount_protect" will ++ be created. By setting this option to 1 at runtime, filesystems ++ will be protected in the following ways: ++ * No new writable mounts will be allowed ++ * Existing read-only mounts won't be able to be remounted read/write ++ * Write operations will be denied on all block devices ++ This option acts independently of grsec_lock: once it is set to 1, ++ it cannot be turned off. Therefore, please be mindful of the resulting ++ behavior if this option is enabled in an init script on a read-only ++ filesystem. This feature is mainly intended for secure embedded systems. ++ ++config GRKERNSEC_CHROOT ++ bool "Chroot jail restrictions" ++ help ++ If you say Y here, you will be able to choose several options that will ++ make breaking out of a chrooted jail much more difficult. If you ++ encounter no software incompatibilities with the following options, it ++ is recommended that you enable each one. ++ ++config GRKERNSEC_CHROOT_MOUNT ++ bool "Deny mounts" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ mount or remount filesystems. If the sysctl option is enabled, a ++ sysctl option with name "chroot_deny_mount" is created. ++ ++config GRKERNSEC_CHROOT_DOUBLE ++ bool "Deny double-chroots" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chroot ++ again outside the chroot. This is a widely used method of breaking ++ out of a chroot jail and should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name ++ "chroot_deny_chroot" is created. ++ ++config GRKERNSEC_CHROOT_PIVOT ++ bool "Deny pivot_root in chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to use ++ a function called pivot_root() that was introduced in Linux 2.3.41. It ++ works similar to chroot in that it changes the root filesystem. This ++ function could be misused in a chrooted process to attempt to break out ++ of the chroot, and therefore should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_pivot" is ++ created. ++ ++config GRKERNSEC_CHROOT_CHDIR ++ bool "Enforce chdir(\"/\") on all chroots" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the current working directory of all newly-chrooted ++ applications will be set to the the root directory of the chroot. ++ The man page on chroot(2) states: ++ Note that this call does not change the current working ++ directory, so that `.' can be outside the tree rooted at ++ `/'. In particular, the super-user can escape from a ++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. ++ ++ It is recommended that you say Y here, since it's not known to break ++ any software. If the sysctl option is enabled, a sysctl option with ++ name "chroot_enforce_chdir" is created. ++ ++config GRKERNSEC_CHROOT_CHMOD ++ bool "Deny (f)chmod +s" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chmod ++ or fchmod files to make them have suid or sgid bits. This protects ++ against another published method of breaking a chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_chmod" is ++ created. ++ ++config GRKERNSEC_CHROOT_FCHDIR ++ bool "Deny fchdir out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, a well-known method of breaking chroots by fchdir'ing ++ to a file descriptor of the chrooting process that points to a directory ++ outside the filesystem will be stopped. If the sysctl option ++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created. ++ ++config GRKERNSEC_CHROOT_MKNOD ++ bool "Deny mknod" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be allowed to ++ mknod. The problem with using mknod inside a chroot is that it ++ would allow an attacker to create a device entry that is the same ++ as one on the physical root of your system, which could range from ++ anything from the console device to a device for your harddrive (which ++ they could then use to wipe the drive or steal data). It is recommended ++ that you say Y here, unless you run into software incompatibilities. ++ If the sysctl option is enabled, a sysctl option with name ++ "chroot_deny_mknod" is created. ++ ++config GRKERNSEC_CHROOT_SHMAT ++ bool "Deny shmat() out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to attach ++ to shared memory segments that were created outside of the chroot jail. ++ It is recommended that you say Y here. If the sysctl option is enabled, ++ a sysctl option with name "chroot_deny_shmat" is created. ++ ++config GRKERNSEC_CHROOT_UNIX ++ bool "Deny access to abstract AF_UNIX sockets out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ connect to abstract (meaning not belonging to a filesystem) Unix ++ domain sockets that were bound outside of a chroot. It is recommended ++ that you say Y here. If the sysctl option is enabled, a sysctl option ++ with name "chroot_deny_unix" is created. ++ ++config GRKERNSEC_CHROOT_FINDTASK ++ bool "Protect outside processes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, ++ getsid, or view any process outside of the chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_findtask" is ++ created. ++ ++config GRKERNSEC_CHROOT_NICE ++ bool "Restrict priority changes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to raise ++ the priority of processes in the chroot, or alter the priority of ++ processes outside the chroot. This provides more security than simply ++ removing CAP_SYS_NICE from the process' capability set. If the ++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" ++ is created. ++ ++config GRKERNSEC_CHROOT_SYSCTL ++ bool "Deny sysctl writes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, an attacker in a chroot will not be able to ++ write to sysctl entries, either by sysctl(2) or through a /proc ++ interface. It is strongly recommended that you say Y here. If the ++ sysctl option is enabled, a sysctl option with name ++ "chroot_deny_sysctl" is created. ++ ++config GRKERNSEC_CHROOT_CAPS ++ bool "Capability restrictions" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the capabilities on all processes within a ++ chroot jail will be lowered to stop module insertion, raw i/o, ++ system and net admin tasks, rebooting the system, modifying immutable ++ files, modifying IPC owned by another, and changing the system time. ++ This is left an option because it can break some apps. Disable this ++ if your chrooted apps are having problems performing those kinds of ++ tasks. If the sysctl option is enabled, a sysctl option with ++ name "chroot_caps" is created. ++ ++endmenu ++menu "Kernel Auditing" ++depends on GRKERNSEC ++ ++config GRKERNSEC_AUDIT_GROUP ++ bool "Single group for auditing" ++ help ++ If you say Y here, the exec, chdir, and (un)mount logging features ++ will only operate on a group you specify. This option is recommended ++ if you only want to watch certain users instead of having a large ++ amount of logs from the entire system. If the sysctl option is enabled, ++ a sysctl option with name "audit_group" is created. ++ ++config GRKERNSEC_AUDIT_GID ++ int "GID for auditing" ++ depends on GRKERNSEC_AUDIT_GROUP ++ default 1007 ++ ++config GRKERNSEC_EXECLOG ++ bool "Exec logging" ++ help ++ If you say Y here, all execve() calls will be logged (since the ++ other exec*() calls are frontends to execve(), all execution ++ will be logged). Useful for shell-servers that like to keep track ++ of their users. If the sysctl option is enabled, a sysctl option with ++ name "exec_logging" is created. ++ WARNING: This option when enabled will produce a LOT of logs, especially ++ on an active system. ++ ++config GRKERNSEC_RESLOG ++ bool "Resource logging" ++ help ++ If you say Y here, all attempts to overstep resource limits will ++ be logged with the resource name, the requested size, and the current ++ limit. It is highly recommended that you say Y here. If the sysctl ++ option is enabled, a sysctl option with name "resource_logging" is ++ created. If the RBAC system is enabled, the sysctl value is ignored. ++ ++config GRKERNSEC_CHROOT_EXECLOG ++ bool "Log execs within chroot" ++ help ++ If you say Y here, all executions inside a chroot jail will be logged ++ to syslog. This can cause a large amount of logs if certain ++ applications (eg. djb's daemontools) are installed on the system, and ++ is therefore left as an option. If the sysctl option is enabled, a ++ sysctl option with name "chroot_execlog" is created. ++ ++config GRKERNSEC_AUDIT_PTRACE ++ bool "Ptrace logging" ++ help ++ If you say Y here, all attempts to attach to a process via ptrace ++ will be logged. If the sysctl option is enabled, a sysctl option ++ with name "audit_ptrace" is created. ++ ++config GRKERNSEC_AUDIT_CHDIR ++ bool "Chdir logging" ++ help ++ If you say Y here, all chdir() calls will be logged. If the sysctl ++ option is enabled, a sysctl option with name "audit_chdir" is created. ++ ++config GRKERNSEC_AUDIT_MOUNT ++ bool "(Un)Mount logging" ++ help ++ If you say Y here, all mounts and unmounts will be logged. If the ++ sysctl option is enabled, a sysctl option with name "audit_mount" is ++ created. ++ ++config GRKERNSEC_SIGNAL ++ bool "Signal logging" ++ help ++ If you say Y here, certain important signals will be logged, such as ++ SIGSEGV, which will as a result inform you of when a error in a program ++ occurred, which in some cases could mean a possible exploit attempt. ++ If the sysctl option is enabled, a sysctl option with name ++ "signal_logging" is created. ++ ++config GRKERNSEC_FORKFAIL ++ bool "Fork failure logging" ++ help ++ If you say Y here, all failed fork() attempts will be logged. ++ This could suggest a fork bomb, or someone attempting to overstep ++ their process limit. If the sysctl option is enabled, a sysctl option ++ with name "forkfail_logging" is created. ++ ++config GRKERNSEC_TIME ++ bool "Time change logging" ++ help ++ If you say Y here, any changes of the system clock will be logged. ++ If the sysctl option is enabled, a sysctl option with name ++ "timechange_logging" is created. ++ ++config GRKERNSEC_PROC_IPADDR ++ bool "/proc/<pid>/ipaddr support" ++ help ++ If you say Y here, a new entry will be added to each /proc/<pid> ++ directory that contains the IP address of the person using the task. ++ The IP is carried across local TCP and AF_UNIX stream sockets. ++ This information can be useful for IDS/IPSes to perform remote response ++ to a local attack. The entry is readable by only the owner of the ++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via ++ the RBAC system), and thus does not create privacy concerns. ++ ++config GRKERNSEC_RWXMAP_LOG ++ bool 'Denied RWX mmap/mprotect logging' ++ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT ++ help ++ If you say Y here, calls to mmap() and mprotect() with explicit ++ usage of PROT_WRITE and PROT_EXEC together will be logged when ++ denied by the PAX_MPROTECT feature. If the sysctl option is ++ enabled, a sysctl option with name "rwxmap_logging" is created. ++ ++config GRKERNSEC_AUDIT_TEXTREL ++ bool 'ELF text relocations logging (READ HELP)' ++ depends on PAX_MPROTECT ++ help ++ If you say Y here, text relocations will be logged with the filename ++ of the offending library or binary. The purpose of the feature is ++ to help Linux distribution developers get rid of libraries and ++ binaries that need text relocations which hinder the future progress ++ of PaX. Only Linux distribution developers should say Y here, and ++ never on a production machine, as this option creates an information ++ leak that could aid an attacker in defeating the randomization of ++ a single memory region. If the sysctl option is enabled, a sysctl ++ option with name "audit_textrel" is created. ++ ++endmenu ++ ++menu "Executable Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_DMESG ++ bool "Dmesg(8) restriction" ++ help ++ If you say Y here, non-root users will not be able to use dmesg(8) ++ to view up to the last 4kb of messages in the kernel's log buffer. ++ The kernel's log buffer often contains kernel addresses and other ++ identifying information useful to an attacker in fingerprinting a ++ system for a targeted exploit. ++ If the sysctl option is enabled, a sysctl option with name "dmesg" is ++ created. ++ ++config GRKERNSEC_HARDEN_PTRACE ++ bool "Deter ptrace-based process snooping" ++ help ++ If you say Y here, TTY sniffers and other malicious monitoring ++ programs implemented through ptrace will be defeated. If you ++ have been using the RBAC system, this option has already been ++ enabled for several years for all users, with the ability to make ++ fine-grained exceptions. ++ ++ This option only affects the ability of non-root users to ptrace ++ processes that are not a descendent of the ptracing process. ++ This means that strace ./binary and gdb ./binary will still work, ++ but attaching to arbitrary processes will not. If the sysctl ++ option is enabled, a sysctl option with name "harden_ptrace" is ++ created. ++ ++config GRKERNSEC_PTRACE_READEXEC ++ bool "Require read access to ptrace sensitive binaries" ++ help ++ If you say Y here, unprivileged users will not be able to ptrace unreadable ++ binaries. This option is useful in environments that ++ remove the read bits (e.g. file mode 4711) from suid binaries to ++ prevent infoleaking of their contents. This option adds ++ consistency to the use of that file mode, as the binary could normally ++ be read out when run without privileges while ptracing. ++ ++ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec" ++ is created. ++ ++config GRKERNSEC_SETXID ++ bool "Enforce consistent multithreaded privileges" ++ depends on (X86 || SPARC64 || PPC || ARM || MIPS) ++ help ++ If you say Y here, a change from a root uid to a non-root uid ++ in a multithreaded application will cause the resulting uids, ++ gids, supplementary groups, and capabilities in that thread ++ to be propagated to the other threads of the process. In most ++ cases this is unnecessary, as glibc will emulate this behavior ++ on behalf of the application. Other libcs do not act in the ++ same way, allowing the other threads of the process to continue ++ running with root privileges. If the sysctl option is enabled, ++ a sysctl option with name "consistent_setxid" is created. ++ ++config GRKERNSEC_TPE ++ bool "Trusted Path Execution (TPE)" ++ help ++ If you say Y here, you will be able to choose a gid to add to the ++ supplementary groups of users you want to mark as "untrusted." ++ These users will not be able to execute any files that are not in ++ root-owned directories writable only by root. If the sysctl option ++ is enabled, a sysctl option with name "tpe" is created. ++ ++config GRKERNSEC_TPE_ALL ++ bool "Partially restrict all non-root users" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, all non-root users will be covered under ++ a weaker TPE restriction. This is separate from, and in addition to, ++ the main TPE options that you have selected elsewhere. Thus, if a ++ "trusted" GID is chosen, this restriction applies to even that GID. ++ Under this restriction, all non-root users will only be allowed to ++ execute files in directories they own that are not group or ++ world-writable, or in directories owned by root and writable only by ++ root. If the sysctl option is enabled, a sysctl option with name ++ "tpe_restrict_all" is created. ++ ++config GRKERNSEC_TPE_INVERT ++ bool "Invert GID option" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, the group you specify in the TPE configuration will ++ decide what group TPE restrictions will be *disabled* for. This ++ option is useful if you want TPE restrictions to be applied to most ++ users on the system. If the sysctl option is enabled, a sysctl option ++ with name "tpe_invert" is created. Unlike other sysctl options, this ++ entry will default to on for backward-compatibility. ++ ++config GRKERNSEC_TPE_GID ++ int "GID for untrusted users" ++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *enabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++config GRKERNSEC_TPE_GID ++ int "GID for trusted users" ++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *disabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++endmenu ++menu "Network Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_RANDNET ++ bool "Larger entropy pools" ++ help ++ If you say Y here, the entropy pools used for many features of Linux ++ and grsecurity will be doubled in size. Since several grsecurity ++ features use additional randomness, it is recommended that you say Y ++ here. Saying Y here has a similar effect as modifying ++ /proc/sys/kernel/random/poolsize. ++ ++config GRKERNSEC_BLACKHOLE ++ bool "TCP/UDP blackhole and LAST_ACK DoS prevention" ++ depends on NET ++ help ++ If you say Y here, neither TCP resets nor ICMP ++ destination-unreachable packets will be sent in response to packets ++ sent to ports for which no associated listening process exists. ++ This feature supports both IPV4 and IPV6 and exempts the ++ loopback interface from blackholing. Enabling this feature ++ makes a host more resilient to DoS attacks and reduces network ++ visibility against scanners. ++ ++ The blackhole feature as-implemented is equivalent to the FreeBSD ++ blackhole feature, as it prevents RST responses to all packets, not ++ just SYNs. Under most application behavior this causes no ++ problems, but applications (like haproxy) may not close certain ++ connections in a way that cleanly terminates them on the remote ++ end, leaving the remote host in LAST_ACK state. Because of this ++ side-effect and to prevent intentional LAST_ACK DoSes, this ++ feature also adds automatic mitigation against such attacks. ++ The mitigation drastically reduces the amount of time a socket ++ can spend in LAST_ACK state. If you're using haproxy and not ++ all servers it connects to have this option enabled, consider ++ disabling this feature on the haproxy host. ++ ++ If the sysctl option is enabled, two sysctl options with names ++ "ip_blackhole" and "lastack_retries" will be created. ++ While "ip_blackhole" takes the standard zero/non-zero on/off ++ toggle, "lastack_retries" uses the same kinds of values as ++ "tcp_retries1" and "tcp_retries2". The default value of 4 ++ prevents a socket from lasting more than 45 seconds in LAST_ACK ++ state. ++ ++config GRKERNSEC_SOCKET ++ bool "Socket restrictions" ++ depends on NET ++ help ++ If you say Y here, you will be able to choose from several options. ++ If you assign a GID on your system and add it to the supplementary ++ groups of users you want to restrict socket access to, this patch ++ will perform up to three things, based on the option(s) you choose. ++ ++config GRKERNSEC_SOCKET_ALL ++ bool "Deny any sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine or run server ++ applications from your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_all" is created. ++ ++config GRKERNSEC_SOCKET_ALL_GID ++ int "GID to deny all sockets for" ++ depends on GRKERNSEC_SOCKET_ALL ++ default 1004 ++ help ++ Here you can choose the GID to disable socket access for. Remember to ++ add the users you want socket access disabled for to the GID ++ specified here. If the sysctl option is enabled, a sysctl option ++ with name "socket_all_gid" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT ++ bool "Deny client sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine, but will be ++ able to run servers. If this option is enabled, all users in the group ++ you specify will have to use passive mode when initiating ftp transfers ++ from the shell on your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_client" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT_GID ++ int "GID to deny client sockets for" ++ depends on GRKERNSEC_SOCKET_CLIENT ++ default 1003 ++ help ++ Here you can choose the GID to disable client socket access for. ++ Remember to add the users you want client socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_client_gid" is created. ++ ++config GRKERNSEC_SOCKET_SERVER ++ bool "Deny server sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to run server applications from your machine. If the sysctl ++ option is enabled, a sysctl option with name "socket_server" is created. ++ ++config GRKERNSEC_SOCKET_SERVER_GID ++ int "GID to deny server sockets for" ++ depends on GRKERNSEC_SOCKET_SERVER ++ default 1002 ++ help ++ Here you can choose the GID to disable server socket access for. ++ Remember to add the users you want server socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_server_gid" is created. ++ ++endmenu ++menu "Sysctl support" ++depends on GRKERNSEC && SYSCTL ++ ++config GRKERNSEC_SYSCTL ++ bool "Sysctl support" ++ help ++ If you say Y here, you will be able to change the options that ++ grsecurity runs with at bootup, without having to recompile your ++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity ++ to enable (1) or disable (0) various features. All the sysctl entries ++ are mutable until the "grsec_lock" entry is set to a non-zero value. ++ All features enabled in the kernel configuration are disabled at boot ++ if you do not say Y to the "Turn on features by default" option. ++ All options should be set at startup, and the grsec_lock entry should ++ be set to a non-zero value after all the options are set. ++ *THIS IS EXTREMELY IMPORTANT* ++ ++config GRKERNSEC_SYSCTL_DISTRO ++ bool "Extra sysctl support for distro makers (READ HELP)" ++ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO ++ help ++ If you say Y here, additional sysctl options will be created ++ for features that affect processes running as root. Therefore, ++ it is critical when using this option that the grsec_lock entry be ++ enabled after boot. Only distros with prebuilt kernel packages ++ with this option enabled that can ensure grsec_lock is enabled ++ after boot should use this option. ++ *Failure to set grsec_lock after boot makes all grsec features ++ this option covers useless* ++ ++ Currently this option creates the following sysctl entries: ++ "Disable Privileged I/O": "disable_priv_io" ++ ++config GRKERNSEC_SYSCTL_ON ++ bool "Turn on features by default" ++ depends on GRKERNSEC_SYSCTL ++ help ++ If you say Y here, instead of having all features enabled in the ++ kernel configuration disabled at boot time, the features will be ++ enabled at boot time. It is recommended you say Y here unless ++ there is some reason you would want all sysctl-tunable features to ++ be disabled by default. As mentioned elsewhere, it is important ++ to enable the grsec_lock entry once you have finished modifying ++ the sysctl entries. ++ ++endmenu ++menu "Logging Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_FLOODTIME ++ int "Seconds in between log messages (minimum)" ++ default 10 ++ help ++ This option allows you to enforce the number of seconds between ++ grsecurity log messages. The default should be suitable for most ++ people, however, if you choose to change it, choose a value small enough ++ to allow informative logs to be produced, but large enough to ++ prevent flooding. ++ ++config GRKERNSEC_FLOODBURST ++ int "Number of messages in a burst (maximum)" ++ default 6 ++ help ++ This option allows you to choose the maximum number of messages allowed ++ within the flood time interval you chose in a separate option. The ++ default should be suitable for most people, however if you find that ++ many of your logs are being interpreted as flooding, you may want to ++ raise this value. ++ ++endmenu ++ ++endmenu +diff --git a/grsecurity/Makefile b/grsecurity/Makefile +new file mode 100644 +index 0000000..1b9afa9 +--- /dev/null ++++ b/grsecurity/Makefile +@@ -0,0 +1,38 @@ ++# grsecurity's ACL system was originally written in 2001 by Michael Dalton ++# during 2001-2009 it has been completely redesigned by Brad Spengler ++# into an RBAC system ++# ++# All code in this directory and various hooks inserted throughout the kernel ++# are copyright Brad Spengler - Open Source Security, Inc., and released ++# under the GPL v2 or higher ++ ++KBUILD_CFLAGS += -Werror ++ ++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ ++ grsec_mount.o grsec_sig.o grsec_sysctl.o \ ++ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o ++ ++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \ ++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ ++ gracl_learn.o grsec_log.o ++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o ++ ++ifdef CONFIG_NET ++obj-y += grsec_sock.o ++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o ++endif ++ ++ifndef CONFIG_GRKERNSEC ++obj-y += grsec_disabled.o ++endif ++ ++ifdef CONFIG_GRKERNSEC_HIDESYM ++extra-y := grsec_hidesym.o ++$(obj)/grsec_hidesym.o: ++ @-chmod -f 500 /boot ++ @-chmod -f 500 /lib/modules ++ @-chmod -f 500 /lib64/modules ++ @-chmod -f 500 /lib32/modules ++ @-chmod -f 700 . ++ @echo ' grsec: protected kernel image paths' ++endif +diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c +new file mode 100644 +index 0000000..c475143 +--- /dev/null ++++ b/grsecurity/gracl.c +@@ -0,0 +1,4171 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/namei.h> ++#include <linux/mount.h> ++#include <linux/tty.h> ++#include <linux/proc_fs.h> ++#include <linux/smp_lock.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/types.h> ++#include <linux/sysctl.h> ++#include <linux/netdevice.h> ++#include <linux/ptrace.h> ++#include <linux/gracl.h> ++#include <linux/gralloc.h> ++#include <linux/security.h> ++#include <linux/grinternal.h> ++#include <linux/pid_namespace.h> ++#include <linux/fdtable.h> ++#include <linux/percpu.h> ++ ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++ ++static struct acl_role_db acl_role_set; ++static struct name_db name_set; ++static struct inodev_db inodev_set; ++ ++/* for keeping track of userspace pointers used for subjects, so we ++ can share references in the kernel as well ++*/ ++ ++static struct dentry *real_root; ++static struct vfsmount *real_root_mnt; ++ ++static struct acl_subj_map_db subj_map_set; ++ ++static struct acl_role_label *default_role; ++ ++static struct acl_role_label *role_list; ++ ++static u16 acl_sp_role_value; ++ ++extern char *gr_shared_page[4]; ++static DEFINE_MUTEX(gr_dev_mutex); ++DEFINE_RWLOCK(gr_inode_lock); ++ ++struct gr_arg *gr_usermode; ++ ++static unsigned int gr_status __read_only = GR_STATUS_INIT; ++ ++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); ++extern void gr_clear_learn_entries(void); ++ ++#ifdef CONFIG_GRKERNSEC_RESLOG ++extern void gr_log_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt); ++#endif ++ ++unsigned char *gr_system_salt; ++unsigned char *gr_system_sum; ++ ++static struct sprole_pw **acl_special_roles = NULL; ++static __u16 num_sprole_pws = 0; ++ ++static struct acl_role_label *kernel_role = NULL; ++ ++static unsigned int gr_auth_attempts = 0; ++static unsigned long gr_auth_expires = 0UL; ++ ++#ifdef CONFIG_NET ++extern struct vfsmount *sock_mnt; ++#endif ++extern struct vfsmount *pipe_mnt; ++extern struct vfsmount *shm_mnt; ++#ifdef CONFIG_HUGETLBFS ++extern struct vfsmount *hugetlbfs_vfsmount; ++#endif ++ ++static struct acl_object_label *fakefs_obj_rw; ++static struct acl_object_label *fakefs_obj_rwx; ++ ++extern int gr_init_uidset(void); ++extern void gr_free_uidset(void); ++extern void gr_remove_uid(uid_t uid); ++extern int gr_find_uid(uid_t uid); ++ ++__inline__ int ++gr_acl_is_enabled(void) ++{ ++ return (gr_status & GR_READY); ++} ++ ++#ifdef CONFIG_BTRFS_FS ++extern dev_t get_btrfs_dev_from_inode(struct inode *inode); ++extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); ++#endif ++ ++static inline dev_t __get_dev(const struct dentry *dentry) ++{ ++#ifdef CONFIG_BTRFS_FS ++ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr) ++ return get_btrfs_dev_from_inode(dentry->d_inode); ++ else ++#endif ++ return dentry->d_inode->i_sb->s_dev; ++} ++ ++dev_t gr_get_dev_from_dentry(struct dentry *dentry) ++{ ++ return __get_dev(dentry); ++} ++ ++static char gr_task_roletype_to_char(struct task_struct *task) ++{ ++ switch (task->role->roletype & ++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | ++ GR_ROLE_SPECIAL)) { ++ case GR_ROLE_DEFAULT: ++ return 'D'; ++ case GR_ROLE_USER: ++ return 'U'; ++ case GR_ROLE_GROUP: ++ return 'G'; ++ case GR_ROLE_SPECIAL: ++ return 'S'; ++ } ++ ++ return 'X'; ++} ++ ++char gr_roletype_to_char(void) ++{ ++ return gr_task_roletype_to_char(current); ++} ++ ++__inline__ int ++gr_acl_tpe_check(void) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ if (current->role->roletype & GR_ROLE_TPE) ++ return 1; ++ else ++ return 0; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (inode && S_ISBLK(inode->i_mode) && ++ grsec_enable_chroot_caps && proc_is_chrooted(current) && ++ !capable(CAP_SYS_RAWIO)) ++ return 1; ++#endif ++ return 0; ++} ++ ++static int ++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb) ++{ ++ if (likely(lena != lenb)) ++ return 0; ++ ++ return !memcmp(a, b, lena); ++} ++ ++static int prepend(char **buffer, int *buflen, const char *str, int namelen) ++{ ++ *buflen -= namelen; ++ if (*buflen < 0) ++ return -ENAMETOOLONG; ++ *buffer -= namelen; ++ memcpy(*buffer, str, namelen); ++ return 0; ++} ++ ++/* this must be called with vfsmount_lock and dcache_lock held */ ++ ++static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt, ++ struct dentry *root, struct vfsmount *rootmnt, ++ char *buffer, int buflen) ++{ ++ char * end = buffer+buflen; ++ char * retval; ++ int namelen; ++ ++ *--end = '\0'; ++ buflen--; ++ ++ if (buflen < 1) ++ goto Elong; ++ /* Get '/' right */ ++ retval = end-1; ++ *retval = '/'; ++ ++ for (;;) { ++ struct dentry * parent; ++ ++ if (dentry == root && vfsmnt == rootmnt) ++ break; ++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { ++ /* Global root? */ ++ if (vfsmnt->mnt_parent == vfsmnt) ++ goto global_root; ++ dentry = vfsmnt->mnt_mountpoint; ++ vfsmnt = vfsmnt->mnt_parent; ++ continue; ++ } ++ parent = dentry->d_parent; ++ prefetch(parent); ++ namelen = dentry->d_name.len; ++ buflen -= namelen + 1; ++ if (buflen < 0) ++ goto Elong; ++ end -= namelen; ++ memcpy(end, dentry->d_name.name, namelen); ++ *--end = '/'; ++ retval = end; ++ dentry = parent; ++ } ++ ++out: ++ return retval; ++ ++global_root: ++ namelen = dentry->d_name.len; ++ buflen -= namelen; ++ if (buflen < 0) ++ goto Elong; ++ retval -= namelen-1; /* hit the slash */ ++ memcpy(retval, dentry->d_name.name, namelen); ++ goto out; ++Elong: ++ retval = ERR_PTR(-ENAMETOOLONG); ++ goto out; ++} ++ ++static char * ++gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt, ++ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen) ++{ ++ char *retval; ++ ++ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); ++ if (unlikely(IS_ERR(retval))) ++ retval = strcpy(buf, "<path too long>"); ++ else if (unlikely(retval[1] == '/' && retval[2] == '\0')) ++ retval[1] = '\0'; ++ ++ return retval; ++} ++ ++static char * ++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ char *res; ++ ++ /* we can use real_root, real_root_mnt, because this is only called ++ by the RBAC system */ ++ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen); ++ ++ return res; ++} ++ ++static char * ++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ char *res; ++ struct dentry *root; ++ struct vfsmount *rootmnt; ++ struct task_struct *reaper = &init_task; ++ ++ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */ ++ read_lock(&reaper->fs->lock); ++ root = dget(reaper->fs->root.dentry); ++ rootmnt = mntget(reaper->fs->root.mnt); ++ read_unlock(&reaper->fs->lock); ++ ++ spin_lock(&dcache_lock); ++ spin_lock(&vfsmount_lock); ++ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen); ++ spin_unlock(&vfsmount_lock); ++ spin_unlock(&dcache_lock); ++ ++ dput(root); ++ mntput(rootmnt); ++ return res; ++} ++ ++static char * ++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ char *ret; ++ spin_lock(&dcache_lock); ++ spin_lock(&vfsmount_lock); ++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++ spin_unlock(&vfsmount_lock); ++ spin_unlock(&dcache_lock); ++ return ret; ++} ++ ++static char * ++gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ char *ret; ++ char *buf; ++ int buflen; ++ ++ spin_lock(&dcache_lock); ++ spin_lock(&vfsmount_lock); ++ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); ++ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6); ++ buflen = (int)(ret - buf); ++ if (buflen >= 5) ++ prepend(&ret, &buflen, "/proc", 5); ++ else ++ ret = strcpy(buf, "<path too long>"); ++ spin_unlock(&vfsmount_lock); ++ spin_unlock(&dcache_lock); ++ return ret; ++} ++ ++char * ++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++__inline__ __u32 ++to_gr_audit(const __u32 reqmode) ++{ ++ /* masks off auditable permission flags, then shifts them to create ++ auditing flags, and adds the special case of append auditing if ++ we're requesting write */ ++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); ++} ++ ++struct acl_subject_label * ++lookup_subject_map(const struct acl_subject_label *userp) ++{ ++ unsigned int index = shash(userp, subj_map_set.s_size); ++ struct subject_map *match; ++ ++ match = subj_map_set.s_hash[index]; ++ ++ while (match && match->user != userp) ++ match = match->next; ++ ++ if (match != NULL) ++ return match->kernel; ++ else ++ return NULL; ++} ++ ++static void ++insert_subj_map_entry(struct subject_map *subjmap) ++{ ++ unsigned int index = shash(subjmap->user, subj_map_set.s_size); ++ struct subject_map **curr; ++ ++ subjmap->prev = NULL; ++ ++ curr = &subj_map_set.s_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = subjmap; ++ ++ subjmap->next = *curr; ++ *curr = subjmap; ++ ++ return; ++} ++ ++static struct acl_role_label * ++lookup_acl_role_label(const struct task_struct *task, const uid_t uid, ++ const gid_t gid) ++{ ++ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); ++ struct acl_role_label *match; ++ struct role_allowed_ip *ipp; ++ unsigned int x; ++ u32 curr_ip = task->signal->curr_ip; ++ ++ task->signal->saved_ip = curr_ip; ++ ++ match = acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == uid) ++ goto found; ++ } ++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) ++ break; ++ match = match->next; ++ } ++found: ++ if (match == NULL) { ++ try_group: ++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); ++ match = acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == gid) ++ goto found2; ++ } ++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) ++ break; ++ match = match->next; ++ } ++found2: ++ if (match == NULL) ++ match = default_role; ++ if (match->allowed_ips == NULL) ++ return match; ++ else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ match = default_role; ++ } ++ } else if (match->allowed_ips == NULL) { ++ return match; ++ } else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ goto try_group; ++ } ++ ++ return match; ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct name_entry * ++lookup_name_entry(const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ struct name_entry *match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len))) ++ match = match->next; ++ ++ return match; ++} ++ ++static struct name_entry * ++lookup_name_entry_create(const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ struct name_entry *match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ !match->deleted)) ++ match = match->next; ++ ++ if (match && match->deleted) ++ return match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ match->deleted)) ++ match = match->next; ++ ++ if (match && !match->deleted) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct inodev_entry * ++lookup_inodev_entry(const ino_t ino, const dev_t dev) ++{ ++ unsigned int index = fhash(ino, dev, inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != ino || match->nentry->device != dev)) ++ match = match->next; ++ ++ return match; ++} ++ ++static void ++insert_inodev_entry(struct inodev_entry *entry) ++{ ++ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device, ++ inodev_set.i_size); ++ struct inodev_entry **curr; ++ ++ entry->prev = NULL; ++ ++ curr = &inodev_set.i_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = entry; ++ ++ entry->next = *curr; ++ *curr = entry; ++ ++ return; ++} ++ ++static void ++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) ++{ ++ unsigned int index = ++ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); ++ struct acl_role_label **curr; ++ struct acl_role_label *tmp, *tmp2; ++ ++ curr = &acl_role_set.r_hash[index]; ++ ++ /* simple case, slot is empty, just set it to our role */ ++ if (*curr == NULL) { ++ *curr = role; ++ } else { ++ /* example: ++ 1 -> 2 -> 3 (adding 2 -> 3 to here) ++ 2 -> 3 ++ */ ++ /* first check to see if we can already be reached via this slot */ ++ tmp = *curr; ++ while (tmp && tmp != role) ++ tmp = tmp->next; ++ if (tmp == role) { ++ /* we don't need to add ourselves to this slot's chain */ ++ return; ++ } ++ /* we need to add ourselves to this chain, two cases */ ++ if (role->next == NULL) { ++ /* simple case, append the current chain to our role */ ++ role->next = *curr; ++ *curr = role; ++ } else { ++ /* 1 -> 2 -> 3 -> 4 ++ 2 -> 3 -> 4 ++ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here) ++ */ ++ /* trickier case: walk our role's chain until we find ++ the role for the start of the current slot's chain */ ++ tmp = role; ++ tmp2 = *curr; ++ while (tmp->next && tmp->next != tmp2) ++ tmp = tmp->next; ++ if (tmp->next == tmp2) { ++ /* from example above, we found 3, so just ++ replace this slot's chain with ours */ ++ *curr = role; ++ } else { ++ /* we didn't find a subset of our role's chain ++ in the current slot's chain, so append their ++ chain to ours, and set us as the first role in ++ the slot's chain ++ ++ we could fold this case with the case above, ++ but making it explicit for clarity ++ */ ++ tmp->next = tmp2; ++ *curr = role; ++ } ++ } ++ } ++ ++ return; ++} ++ ++static void ++insert_acl_role_label(struct acl_role_label *role) ++{ ++ int i; ++ ++ if (role_list == NULL) { ++ role_list = role; ++ role->prev = NULL; ++ } else { ++ role->prev = role_list; ++ role_list = role; ++ } ++ ++ /* used for hash chains */ ++ role->next = NULL; ++ ++ if (role->roletype & GR_ROLE_DOMAIN) { ++ for (i = 0; i < role->domain_child_num; i++) ++ __insert_acl_role_label(role, role->domain_children[i]); ++ } else ++ __insert_acl_role_label(role, role->uidgid); ++} ++ ++static int ++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted) ++{ ++ struct name_entry **curr, *nentry; ++ struct inodev_entry *ientry; ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ ++ curr = &name_set.n_hash[index]; ++ ++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len))) ++ curr = &((*curr)->next); ++ ++ if (*curr != NULL) ++ return 1; ++ ++ nentry = acl_alloc(sizeof (struct name_entry)); ++ if (nentry == NULL) ++ return 0; ++ ientry = acl_alloc(sizeof (struct inodev_entry)); ++ if (ientry == NULL) ++ return 0; ++ ientry->nentry = nentry; ++ ++ nentry->key = key; ++ nentry->name = name; ++ nentry->inode = inode; ++ nentry->device = device; ++ nentry->len = len; ++ nentry->deleted = deleted; ++ ++ nentry->prev = NULL; ++ curr = &name_set.n_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = nentry; ++ nentry->next = *curr; ++ *curr = nentry; ++ ++ /* insert us into the table searchable by inode/dev */ ++ insert_inodev_entry(ientry); ++ ++ return 1; ++} ++ ++static void ++insert_acl_obj_label(struct acl_object_label *obj, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = ++ fhash(obj->inode, obj->device, subj->obj_hash_size); ++ struct acl_object_label **curr; ++ ++ ++ obj->prev = NULL; ++ ++ curr = &subj->obj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++static void ++insert_acl_subj_label(struct acl_subject_label *obj, ++ struct acl_role_label *role) ++{ ++ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size); ++ struct acl_subject_label **curr; ++ ++ obj->prev = NULL; ++ ++ curr = &role->subj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */ ++ ++static void * ++create_table(__u32 * len, int elementsize) ++{ ++ unsigned int table_sizes[] = { ++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, ++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, ++ 4194301, 8388593, 16777213, 33554393, 67108859 ++ }; ++ void *newtable = NULL; ++ unsigned int pwr = 0; ++ ++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && ++ table_sizes[pwr] <= *len) ++ pwr++; ++ ++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize)) ++ return newtable; ++ ++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) ++ newtable = ++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); ++ else ++ newtable = vmalloc(table_sizes[pwr] * elementsize); ++ ++ *len = table_sizes[pwr]; ++ ++ return newtable; ++} ++ ++static int ++init_variables(const struct gr_arg *arg) ++{ ++ struct task_struct *reaper = &init_task; ++ unsigned int stacksize; ++ ++ subj_map_set.s_size = arg->role_db.num_subjects; ++ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; ++ name_set.n_size = arg->role_db.num_objects; ++ inodev_set.i_size = arg->role_db.num_objects; ++ ++ if (!subj_map_set.s_size || !acl_role_set.r_size || ++ !name_set.n_size || !inodev_set.i_size) ++ return 1; ++ ++ if (!gr_init_uidset()) ++ return 1; ++ ++ /* set up the stack that holds allocation info */ ++ ++ stacksize = arg->role_db.num_pointers + 5; ++ ++ if (!acl_alloc_stack_init(stacksize)) ++ return 1; ++ ++ /* grab reference for the real root dentry and vfsmount */ ++ read_lock(&reaper->fs->lock); ++ real_root = dget(reaper->fs->root.dentry); ++ real_root_mnt = mntget(reaper->fs->root.mnt); ++ read_unlock(&reaper->fs->lock); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino); ++#endif ++ ++ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label)); ++ if (fakefs_obj_rw == NULL) ++ return 1; ++ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE; ++ ++ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label)); ++ if (fakefs_obj_rwx == NULL) ++ return 1; ++ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC; ++ ++ subj_map_set.s_hash = ++ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *)); ++ acl_role_set.r_hash = ++ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *)); ++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *)); ++ inodev_set.i_hash = ++ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *)); ++ ++ if (!subj_map_set.s_hash || !acl_role_set.r_hash || ++ !name_set.n_hash || !inodev_set.i_hash) ++ return 1; ++ ++ memset(subj_map_set.s_hash, 0, ++ sizeof(struct subject_map *) * subj_map_set.s_size); ++ memset(acl_role_set.r_hash, 0, ++ sizeof (struct acl_role_label *) * acl_role_set.r_size); ++ memset(name_set.n_hash, 0, ++ sizeof (struct name_entry *) * name_set.n_size); ++ memset(inodev_set.i_hash, 0, ++ sizeof (struct inodev_entry *) * inodev_set.i_size); ++ ++ return 0; ++} ++ ++/* free information not needed after startup ++ currently contains user->kernel pointer mappings for subjects ++*/ ++ ++static void ++free_init_variables(void) ++{ ++ __u32 i; ++ ++ if (subj_map_set.s_hash) { ++ for (i = 0; i < subj_map_set.s_size; i++) { ++ if (subj_map_set.s_hash[i]) { ++ kfree(subj_map_set.s_hash[i]); ++ subj_map_set.s_hash[i] = NULL; ++ } ++ } ++ ++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <= ++ PAGE_SIZE) ++ kfree(subj_map_set.s_hash); ++ else ++ vfree(subj_map_set.s_hash); ++ } ++ ++ return; ++} ++ ++static void ++free_variables(void) ++{ ++ struct acl_subject_label *s; ++ struct acl_role_label *r; ++ struct task_struct *task, *task2; ++ unsigned int x; ++ ++ gr_clear_learn_entries(); ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(task2, task) { ++ task->acl_sp_role = 0; ++ task->acl_role_id = 0; ++ task->acl = NULL; ++ task->role = NULL; ++ } while_each_thread(task2, task); ++ read_unlock(&tasklist_lock); ++ ++ /* release the reference to the real root dentry and vfsmount */ ++ if (real_root) ++ dput(real_root); ++ real_root = NULL; ++ if (real_root_mnt) ++ mntput(real_root_mnt); ++ real_root_mnt = NULL; ++ ++ /* free all object hash tables */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (r->subj_hash == NULL) ++ goto next_role; ++ FOR_EACH_SUBJECT_START(r, s, x) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_SUBJECT_END(s, x) ++ FOR_EACH_NESTED_SUBJECT_START(r, s) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_NESTED_SUBJECT_END(s) ++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE) ++ kfree(r->subj_hash); ++ else ++ vfree(r->subj_hash); ++ r->subj_hash = NULL; ++next_role: ++ FOR_EACH_ROLE_END(r) ++ ++ acl_free_all(); ++ ++ if (acl_role_set.r_hash) { ++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <= ++ PAGE_SIZE) ++ kfree(acl_role_set.r_hash); ++ else ++ vfree(acl_role_set.r_hash); ++ } ++ if (name_set.n_hash) { ++ if ((name_set.n_size * sizeof (struct name_entry *)) <= ++ PAGE_SIZE) ++ kfree(name_set.n_hash); ++ else ++ vfree(name_set.n_hash); ++ } ++ ++ if (inodev_set.i_hash) { ++ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <= ++ PAGE_SIZE) ++ kfree(inodev_set.i_hash); ++ else ++ vfree(inodev_set.i_hash); ++ } ++ ++ gr_free_uidset(); ++ ++ memset(&name_set, 0, sizeof (struct name_db)); ++ memset(&inodev_set, 0, sizeof (struct inodev_db)); ++ memset(&acl_role_set, 0, sizeof (struct acl_role_db)); ++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db)); ++ ++ default_role = NULL; ++ kernel_role = NULL; ++ role_list = NULL; ++ ++ return; ++} ++ ++static __u32 ++count_user_objs(struct acl_object_label *userp) ++{ ++ struct acl_object_label o_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_from_user(&o_tmp, userp, ++ sizeof (struct acl_object_label))) ++ break; ++ ++ userp = o_tmp.prev; ++ num++; ++ } ++ ++ return num; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role); ++ ++static int ++copy_user_glob(struct acl_object_label *obj) ++{ ++ struct acl_object_label *g_tmp, **guser; ++ unsigned int len; ++ char *tmp; ++ ++ if (obj->globbed == NULL) ++ return 0; ++ ++ guser = &obj->globbed; ++ while (*guser) { ++ g_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label)); ++ if (g_tmp == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(g_tmp, *guser, ++ sizeof (struct acl_object_label))) ++ return -EFAULT; ++ ++ len = strnlen_user(g_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, g_tmp->filename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ g_tmp->filename = tmp; ++ ++ *guser = g_tmp; ++ guser = &(g_tmp->next); ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, ++ struct acl_role_label *role) ++{ ++ struct acl_object_label *o_tmp; ++ unsigned int len; ++ int ret; ++ char *tmp; ++ ++ while (userp) { ++ if ((o_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(o_tmp, userp, ++ sizeof (struct acl_object_label))) ++ return -EFAULT; ++ ++ userp = o_tmp->prev; ++ ++ len = strnlen_user(o_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, o_tmp->filename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ o_tmp->filename = tmp; ++ ++ insert_acl_obj_label(o_tmp, subj); ++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode, ++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return -ENOMEM; ++ ++ ret = copy_user_glob(o_tmp); ++ if (ret) ++ return ret; ++ ++ if (o_tmp->nested) { ++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role); ++ if (IS_ERR(o_tmp->nested)) ++ return PTR_ERR(o_tmp->nested); ++ ++ /* insert into nested subject list */ ++ o_tmp->nested->next = role->hash->first; ++ role->hash->first = o_tmp->nested; ++ } ++ } ++ ++ return 0; ++} ++ ++static __u32 ++count_user_subjs(struct acl_subject_label *userp) ++{ ++ struct acl_subject_label s_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_from_user(&s_tmp, userp, ++ sizeof (struct acl_subject_label))) ++ break; ++ ++ userp = s_tmp.prev; ++ /* do not count nested subjects against this count, since ++ they are not included in the hash table, but are ++ attached to objects. We have already counted ++ the subjects in userspace for the allocation ++ stack ++ */ ++ if (!(s_tmp.mode & GR_NESTED)) ++ num++; ++ } ++ ++ return num; ++} ++ ++static int ++copy_user_allowedips(struct acl_role_label *rolep) ++{ ++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; ++ ++ ruserip = rolep->allowed_ips; ++ ++ while (ruserip) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_allowed_ip *) ++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(rtmp, ruserip, ++ sizeof (struct role_allowed_ip))) ++ return -EFAULT; ++ ++ ruserip = rtmp->prev; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->allowed_ips = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!ruserip) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_transitions(struct acl_role_label *rolep) ++{ ++ struct role_transition *rusertp, *rtmp = NULL, *rlast; ++ ++ unsigned int len; ++ char *tmp; ++ ++ rusertp = rolep->transitions; ++ ++ while (rusertp) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_transition *) ++ acl_alloc(sizeof (struct role_transition))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(rtmp, rusertp, ++ sizeof (struct role_transition))) ++ return -EFAULT; ++ ++ rusertp = rtmp->prev; ++ ++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= GR_SPROLE_LEN) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, rtmp->rolename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ rtmp->rolename = tmp; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->transitions = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!rusertp) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role) ++{ ++ struct acl_subject_label *s_tmp = NULL, *s_tmp2; ++ unsigned int len; ++ char *tmp; ++ __u32 num_objs; ++ struct acl_ip_label **i_tmp, *i_utmp2; ++ struct gr_hash_struct ghash; ++ struct subject_map *subjmap; ++ unsigned int i_num; ++ int err; ++ ++ s_tmp = lookup_subject_map(userp); ++ ++ /* we've already copied this subject into the kernel, just return ++ the reference to it, and don't copy it over again ++ */ ++ if (s_tmp) ++ return(s_tmp); ++ ++ if ((s_tmp = (struct acl_subject_label *) ++ acl_alloc(sizeof (struct acl_subject_label))) == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); ++ if (subjmap == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap->user = userp; ++ subjmap->kernel = s_tmp; ++ insert_subj_map_entry(subjmap); ++ ++ if (copy_from_user(s_tmp, userp, ++ sizeof (struct acl_subject_label))) ++ return ERR_PTR(-EFAULT); ++ ++ len = strnlen_user(s_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return ERR_PTR(-EINVAL); ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_from_user(tmp, s_tmp->filename, len)) ++ return ERR_PTR(-EFAULT); ++ tmp[len-1] = '\0'; ++ s_tmp->filename = tmp; ++ ++ if (!strcmp(s_tmp->filename, "/")) ++ role->root_label = s_tmp; ++ ++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct))) ++ return ERR_PTR(-EFAULT); ++ ++ /* copy user and group transition tables */ ++ ++ if (s_tmp->user_trans_num) { ++ uid_t *uidlist; ++ ++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t)); ++ if (uidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->user_transitions = uidlist; ++ } ++ ++ if (s_tmp->group_trans_num) { ++ gid_t *gidlist; ++ ++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t)); ++ if (gidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->group_transitions = gidlist; ++ } ++ ++ /* set up object hash table */ ++ num_objs = count_user_objs(ghash.first); ++ ++ s_tmp->obj_hash_size = num_objs; ++ s_tmp->obj_hash = ++ (struct acl_object_label **) ++ create_table(&(s_tmp->obj_hash_size), sizeof(void *)); ++ ++ if (!s_tmp->obj_hash) ++ return ERR_PTR(-ENOMEM); ++ ++ memset(s_tmp->obj_hash, 0, ++ s_tmp->obj_hash_size * ++ sizeof (struct acl_object_label *)); ++ ++ /* add in objects */ ++ err = copy_user_objs(ghash.first, s_tmp, role); ++ ++ if (err) ++ return ERR_PTR(err); ++ ++ /* set pointer for parent subject */ ++ if (s_tmp->parent_subject) { ++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role); ++ ++ if (IS_ERR(s_tmp2)) ++ return s_tmp2; ++ ++ s_tmp->parent_subject = s_tmp2; ++ } ++ ++ /* add in ip acls */ ++ ++ if (!s_tmp->ip_num) { ++ s_tmp->ips = NULL; ++ goto insert; ++ } ++ ++ i_tmp = ++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num, ++ sizeof (struct acl_ip_label *)); ++ ++ if (!i_tmp) ++ return ERR_PTR(-ENOMEM); ++ ++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { ++ *(i_tmp + i_num) = ++ (struct acl_ip_label *) ++ acl_alloc(sizeof (struct acl_ip_label)); ++ if (!*(i_tmp + i_num)) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_from_user ++ (&i_utmp2, s_tmp->ips + i_num, ++ sizeof (struct acl_ip_label *))) ++ return ERR_PTR(-EFAULT); ++ ++ if (copy_from_user ++ (*(i_tmp + i_num), i_utmp2, ++ sizeof (struct acl_ip_label))) ++ return ERR_PTR(-EFAULT); ++ ++ if ((*(i_tmp + i_num))->iface == NULL) ++ continue; ++ ++ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ); ++ if (!len || len >= IFNAMSIZ) ++ return ERR_PTR(-EINVAL); ++ tmp = acl_alloc(len); ++ if (tmp == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len)) ++ return ERR_PTR(-EFAULT); ++ (*(i_tmp + i_num))->iface = tmp; ++ } ++ ++ s_tmp->ips = i_tmp; ++ ++insert: ++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode, ++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return ERR_PTR(-ENOMEM); ++ ++ return s_tmp; ++} ++ ++static int ++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) ++{ ++ struct acl_subject_label s_pre; ++ struct acl_subject_label * ret; ++ int err; ++ ++ while (userp) { ++ if (copy_from_user(&s_pre, userp, ++ sizeof (struct acl_subject_label))) ++ return -EFAULT; ++ ++ /* do not add nested subjects here, add ++ while parsing objects ++ */ ++ ++ if (s_pre.mode & GR_NESTED) { ++ userp = s_pre.prev; ++ continue; ++ } ++ ++ ret = do_copy_user_subj(userp, role); ++ ++ err = PTR_ERR(ret); ++ if (IS_ERR(ret)) ++ return err; ++ ++ insert_acl_subj_label(ret, role); ++ ++ userp = s_pre.prev; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_acl(struct gr_arg *arg) ++{ ++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2; ++ struct sprole_pw *sptmp; ++ struct gr_hash_struct *ghash; ++ uid_t *domainlist; ++ unsigned int r_num; ++ unsigned int len; ++ char *tmp; ++ int err = 0; ++ __u16 i; ++ __u32 num_subjs; ++ ++ /* we need a default and kernel role */ ++ if (arg->role_db.num_roles < 2) ++ return -EINVAL; ++ ++ /* copy special role authentication info from userspace */ ++ ++ num_sprole_pws = arg->num_sprole_pws; ++ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *)); ++ ++ if (!acl_special_roles && num_sprole_pws) ++ return -ENOMEM; ++ ++ for (i = 0; i < num_sprole_pws; i++) { ++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); ++ if (!sptmp) ++ return -ENOMEM; ++ if (copy_from_user(sptmp, arg->sprole_pws + i, ++ sizeof (struct sprole_pw))) ++ return -EFAULT; ++ ++ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= GR_SPROLE_LEN) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, sptmp->rolename, len)) ++ return -EFAULT; ++ ++ tmp[len-1] = '\0'; ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Copying special role %s\n", tmp); ++#endif ++ sptmp->rolename = tmp; ++ acl_special_roles[i] = sptmp; ++ } ++ ++ r_utmp = (struct acl_role_label **) arg->role_db.r_table; ++ ++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { ++ r_tmp = acl_alloc(sizeof (struct acl_role_label)); ++ ++ if (!r_tmp) ++ return -ENOMEM; ++ ++ if (copy_from_user(&r_utmp2, r_utmp + r_num, ++ sizeof (struct acl_role_label *))) ++ return -EFAULT; ++ ++ if (copy_from_user(r_tmp, r_utmp2, ++ sizeof (struct acl_role_label))) ++ return -EFAULT; ++ ++ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, r_tmp->rolename, len)) ++ return -EFAULT; ++ ++ tmp[len-1] = '\0'; ++ r_tmp->rolename = tmp; ++ ++ if (!strcmp(r_tmp->rolename, "default") ++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) { ++ default_role = r_tmp; ++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { ++ kernel_role = r_tmp; ++ } ++ ++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) ++ return -EFAULT; ++ ++ r_tmp->hash = ghash; ++ ++ num_subjs = count_user_subjs(r_tmp->hash->first); ++ ++ r_tmp->subj_hash_size = num_subjs; ++ r_tmp->subj_hash = ++ (struct acl_subject_label **) ++ create_table(&(r_tmp->subj_hash_size), sizeof(void *)); ++ ++ if (!r_tmp->subj_hash) ++ return -ENOMEM; ++ ++ err = copy_user_allowedips(r_tmp); ++ if (err) ++ return err; ++ ++ /* copy domain info */ ++ if (r_tmp->domain_children != NULL) { ++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t)); ++ if (domainlist == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) ++ return -EFAULT; ++ ++ r_tmp->domain_children = domainlist; ++ } ++ ++ err = copy_user_transitions(r_tmp); ++ if (err) ++ return err; ++ ++ memset(r_tmp->subj_hash, 0, ++ r_tmp->subj_hash_size * ++ sizeof (struct acl_subject_label *)); ++ ++ err = copy_user_subjs(r_tmp->hash->first, r_tmp); ++ ++ if (err) ++ return err; ++ ++ /* set nested subject list to null */ ++ r_tmp->hash->first = NULL; ++ ++ insert_acl_role_label(r_tmp); ++ } ++ ++ if (default_role == NULL || kernel_role == NULL) ++ return -EINVAL; ++ ++ return err; ++} ++ ++static int ++gracl_init(struct gr_arg *args) ++{ ++ int error = 0; ++ ++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN); ++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN); ++ ++ if (init_variables(args)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); ++ error = -ENOMEM; ++ free_variables(); ++ goto out; ++ } ++ ++ error = copy_user_acl(args); ++ free_init_variables(); ++ if (error) { ++ free_variables(); ++ goto out; ++ } ++ ++ if ((error = gr_set_acls(0))) { ++ free_variables(); ++ goto out; ++ } ++ ++ pax_open_kernel(); ++ gr_status |= GR_READY; ++ pax_close_kernel(); ++ ++ out: ++ return error; ++} ++ ++/* derived from glibc fnmatch() 0: match, 1: no match*/ ++ ++static int ++glob_match(const char *p, const char *n) ++{ ++ char c; ++ ++ while ((c = *p++) != '\0') { ++ switch (c) { ++ case '?': ++ if (*n == '\0') ++ return 1; ++ else if (*n == '/') ++ return 1; ++ break; ++ case '\\': ++ if (*n != c) ++ return 1; ++ break; ++ case '*': ++ for (c = *p++; c == '?' || c == '*'; c = *p++) { ++ if (*n == '/') ++ return 1; ++ else if (c == '?') { ++ if (*n == '\0') ++ return 1; ++ else ++ ++n; ++ } ++ } ++ if (c == '\0') { ++ return 0; ++ } else { ++ const char *endp; ++ ++ if ((endp = strchr(n, '/')) == NULL) ++ endp = n + strlen(n); ++ ++ if (c == '[') { ++ for (--p; n < endp; ++n) ++ if (!glob_match(p, n)) ++ return 0; ++ } else if (c == '/') { ++ while (*n != '\0' && *n != '/') ++ ++n; ++ if (*n == '/' && !glob_match(p, n + 1)) ++ return 0; ++ } else { ++ for (--p; n < endp; ++n) ++ if (*n == c && !glob_match(p, n)) ++ return 0; ++ } ++ ++ return 1; ++ } ++ case '[': ++ { ++ int not; ++ char cold; ++ ++ if (*n == '\0' || *n == '/') ++ return 1; ++ ++ not = (*p == '!' || *p == '^'); ++ if (not) ++ ++p; ++ ++ c = *p++; ++ for (;;) { ++ unsigned char fn = (unsigned char)*n; ++ ++ if (c == '\0') ++ return 1; ++ else { ++ if (c == fn) ++ goto matched; ++ cold = c; ++ c = *p++; ++ ++ if (c == '-' && *p != ']') { ++ unsigned char cend = *p++; ++ ++ if (cend == '\0') ++ return 1; ++ ++ if (cold <= fn && fn <= cend) ++ goto matched; ++ ++ c = *p++; ++ } ++ } ++ ++ if (c == ']') ++ break; ++ } ++ if (!not) ++ return 1; ++ break; ++ matched: ++ while (c != ']') { ++ if (c == '\0') ++ return 1; ++ ++ c = *p++; ++ } ++ if (not) ++ return 1; ++ } ++ break; ++ default: ++ if (c != *n) ++ return 1; ++ } ++ ++ ++n; ++ } ++ ++ if (*n == '\0') ++ return 0; ++ ++ if (*n == '/') ++ return 0; ++ ++ return 1; ++} ++ ++static struct acl_object_label * ++chk_glob_label(struct acl_object_label *globbed, ++ const struct dentry *dentry, const struct vfsmount *mnt, char **path) ++{ ++ struct acl_object_label *tmp; ++ ++ if (*path == NULL) ++ *path = gr_to_filename_nolock(dentry, mnt); ++ ++ tmp = globbed; ++ ++ while (tmp) { ++ if (!glob_match(tmp->filename, *path)) ++ return tmp; ++ tmp = tmp->next; ++ } ++ ++ return NULL; ++} ++ ++static struct acl_object_label * ++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ const ino_t curr_ino, const dev_t curr_dev, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ struct acl_subject_label *tmpsubj; ++ struct acl_object_label *retval; ++ struct acl_object_label *retval2; ++ ++ tmpsubj = (struct acl_subject_label *) subj; ++ read_lock(&gr_inode_lock); ++ do { ++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj); ++ if (retval) { ++ if (checkglob && retval->globbed) { ++ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path); ++ if (retval2) ++ retval = retval2; ++ } ++ break; ++ } ++ } while ((tmpsubj = tmpsubj->parent_subject)); ++ read_unlock(&gr_inode_lock); ++ ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ const struct dentry *curr_dentry, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ int newglob = checkglob; ++ ++ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking ++ as we don't want a / * rule to match instead of the / object ++ don't do this for create lookups that call this function though, since they're looking up ++ on the parent and thus need globbing checks on all paths ++ */ ++ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB) ++ newglob = GR_NO_GLOB; ++ ++ return __full_lookup(orig_dentry, orig_mnt, ++ curr_dentry->d_inode->i_ino, ++ __get_dev(curr_dentry), subj, path, newglob); ++} ++ ++static struct acl_object_label * ++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path, const int checkglob) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct acl_object_label *retval; ++ ++ spin_lock(&dcache_lock); ++ spin_lock(&vfsmount_lock); ++ ++ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt || ++#ifdef CONFIG_NET ++ mnt == sock_mnt || ++#endif ++#ifdef CONFIG_HUGETLBFS ++ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) || ++#endif ++ /* ignore Eric Biederman */ ++ IS_PRIVATE(l_dentry->d_inode))) { ++ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw; ++ goto out; ++ } ++ ++ for (;;) { ++ if (dentry == real_root && mnt == real_root_mnt) ++ break; ++ ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = dentry->d_parent; ++ } ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ ++ if (retval == NULL) ++ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob); ++out: ++ spin_unlock(&vfsmount_lock); ++ spin_unlock(&dcache_lock); ++ ++ BUG_ON(retval == NULL); ++ ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path) ++{ ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB); ++} ++ ++static struct acl_subject_label * ++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_role_label *role) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct acl_subject_label *retval; ++ ++ spin_lock(&dcache_lock); ++ spin_lock(&vfsmount_lock); ++ ++ for (;;) { ++ if (dentry == real_root && mnt == real_root_mnt) ++ break; ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ ++ read_lock(&gr_inode_lock); ++ retval = ++ lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = dentry->d_parent; ++ } ++ ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ ++ if (unlikely(retval == NULL)) { ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(real_root->d_inode->i_ino, ++ __get_dev(real_root), role); ++ read_unlock(&gr_inode_lock); ++ } ++out: ++ spin_unlock(&vfsmount_lock); ++ spin_unlock(&dcache_lock); ++ ++ BUG_ON(retval == NULL); ++ ++ return retval; ++} ++ ++static void ++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_sysctl(const char *path, const __u32 mode) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_id_change(const char type, const unsigned int real, ++ const unsigned int effective, const unsigned int fs) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ type, real, effective, fs, &task->signal->saved_ip); ++ ++ return; ++} ++ ++__u32 ++gr_search_file(const struct dentry * dentry, const __u32 mode, ++ const struct vfsmount * mnt) ++{ ++ __u32 retval = mode; ++ struct acl_subject_label *curracl; ++ struct acl_object_label *currobj; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ curracl = current->acl; ++ ++ currobj = chk_obj_label(dentry, mnt, curracl); ++ retval = currobj->mode & mode; ++ ++ /* if we're opening a specified transfer file for writing ++ (e.g. /dev/initctl), then transfer our role to init ++ */ ++ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE && ++ current->role->roletype & GR_ROLE_PERSIST)) { ++ struct task_struct *task = init_pid_ns.child_reaper; ++ ++ if (task->role != current->role) { ++ task->acl_sp_role = 0; ++ task->acl_role_id = current->acl_role_id; ++ task->role = current->role; ++ rcu_read_lock(); ++ read_lock(&grsec_exec_file_lock); ++ gr_apply_subject_to_task(task); ++ read_unlock(&grsec_exec_file_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG); ++ } ++ } ++ ++ if (unlikely ++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) ++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ retval = new_mode; ++ ++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) ++ new_mode |= GR_INHERIT; ++ ++ if (!(mode & GR_NOLEARN)) ++ gr_log_learn(dentry, mnt, new_mode); ++ } ++ ++ return retval; ++} ++ ++struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry, ++ const struct dentry *parent, ++ const struct vfsmount *mnt) ++{ ++ struct name_entry *match; ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *curracl; ++ char *path; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return NULL; ++ ++ preempt_disable(); ++ path = gr_to_filename_rbac(new_dentry, mnt); ++ match = lookup_name_entry_create(path); ++ ++ curracl = current->acl; ++ ++ if (match) { ++ read_lock(&gr_inode_lock); ++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); ++ read_unlock(&gr_inode_lock); ++ ++ if (matchpo) { ++ preempt_enable(); ++ return matchpo; ++ } ++ } ++ ++ // lookup parent ++ ++ matchpo = chk_obj_create_label(parent, mnt, curracl, path); ++ ++ preempt_enable(); ++ return matchpo; ++} ++ ++__u32 ++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, ++ const struct vfsmount * mnt, const __u32 mode) ++{ ++ struct acl_object_label *matchpo; ++ __u32 retval; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ matchpo = gr_get_create_object(new_dentry, parent, mnt); ++ ++ retval = matchpo->mode & mode; ++ ++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) ++ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ gr_log_learn(new_dentry, mnt, new_mode); ++ return new_mode; ++ } ++ ++ return retval; ++} ++ ++__u32 ++gr_check_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, const struct vfsmount * old_mnt) ++{ ++ struct acl_object_label *obj; ++ __u32 oldmode, newmode; ++ __u32 needmode; ++ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ | ++ GR_DELETE | GR_INHERIT; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (GR_CREATE | GR_LINK); ++ ++ obj = chk_obj_label(old_dentry, old_mnt, current->acl); ++ oldmode = obj->mode; ++ ++ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt); ++ newmode = obj->mode; ++ ++ needmode = newmode & checkmodes; ++ ++ // old name for hardlink must have at least the permissions of the new name ++ if ((oldmode & needmode) != needmode) ++ goto bad; ++ ++ // if old name had restrictions/auditing, make sure the new name does as well ++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); ++ ++ // don't allow hardlinking of suid/sgid files without permission ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) ++ needmode |= GR_SETID; ++ ++ if ((newmode & needmode) != needmode) ++ goto bad; ++ ++ // enforce minimum permissions ++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) ++ return newmode; ++bad: ++ needmode = oldmode; ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) ++ needmode |= GR_SETID; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK); ++ return (GR_CREATE | GR_LINK); ++ } else if (newmode & GR_SUPPRESS) ++ return GR_SUPPRESS; ++ else ++ return 0; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW)) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY) || !task)) ++ return 0; ++ ++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ task->acl != current->acl) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ struct task_struct *p; ++ int ret = 0; ++ ++ if (unlikely(!(gr_status & GR_READY) || !pid)) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ p->acl != current->acl) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ ++ return ret; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ /* plain copying of fields is already done by dup_task_struct */ ++ tsk->signal->used_accept = 0; ++ tsk->acl_sp_role = 0; ++ //tsk->acl_role_id = current->acl_role_id; ++ //tsk->acl = current->acl; ++ //tsk->role = current->role; ++ tsk->signal->curr_ip = current->signal->curr_ip; ++ tsk->signal->saved_ip = current->signal->saved_ip; ++ if (current->exec_file) ++ get_file(current->exec_file); ++ //tsk->exec_file = current->exec_file; ++ //tsk->is_writable = current->is_writable; ++ if (unlikely(current->signal->used_accept)) { ++ current->signal->curr_ip = 0; ++ current->signal->saved_ip = 0; ++ } ++ ++ return; ++} ++ ++static void ++gr_set_proc_res(struct task_struct *task) ++{ ++ struct acl_subject_label *proc; ++ unsigned short i; ++ ++ proc = task->acl; ++ ++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return; ++ ++ for (i = 0; i < RLIM_NLIMITS; i++) { ++ if (!(proc->resmask & (1 << i))) ++ continue; ++ ++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur; ++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max; ++ } ++ ++ return; ++} ++ ++extern int __gr_process_user_ban(struct user_struct *user); ++ ++int ++gr_check_user_change(int real, int effective, int fs) ++{ ++ unsigned int i; ++ __u16 num; ++ uid_t *uidlist; ++ int curuid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ struct user_struct *user; ++ ++ if (real == -1) ++ goto skipit; ++ ++ user = find_user(real); ++ if (user == NULL) ++ goto skipit; ++ ++ if (__gr_process_user_ban(user)) { ++ /* for find_user */ ++ free_uid(user); ++ return 1; ++ } ++ ++ /* for find_user */ ++ free_uid(user); ++ ++skipit: ++#endif ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_id_change('u', real, effective, fs); ++ ++ num = current->acl->user_trans_num; ++ uidlist = current->acl->user_transitions; ++ ++ if (uidlist == NULL) ++ return 0; ++ ++ if (real == -1) ++ realok = 1; ++ if (effective == -1) ++ effectiveok = 1; ++ if (fs == -1) ++ fsok = 1; ++ ++ if (current->acl->user_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curuid = (int)uidlist[i]; ++ if (real == curuid) ++ realok = 1; ++ if (effective == curuid) ++ effectiveok = 1; ++ if (fs == curuid) ++ fsok = 1; ++ } ++ } else if (current->acl->user_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curuid = (int)uidlist[i]; ++ if (real == curuid) ++ break; ++ if (effective == curuid) ++ break; ++ if (fs == curuid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); ++ return 1; ++ } ++} ++ ++int ++gr_check_group_change(int real, int effective, int fs) ++{ ++ unsigned int i; ++ __u16 num; ++ gid_t *gidlist; ++ int curgid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_id_change('g', real, effective, fs); ++ ++ num = current->acl->group_trans_num; ++ gidlist = current->acl->group_transitions; ++ ++ if (gidlist == NULL) ++ return 0; ++ ++ if (real == -1) ++ realok = 1; ++ if (effective == -1) ++ effectiveok = 1; ++ if (fs == -1) ++ fsok = 1; ++ ++ if (current->acl->group_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curgid = (int)gidlist[i]; ++ if (real == curgid) ++ realok = 1; ++ if (effective == curgid) ++ effectiveok = 1; ++ if (fs == curgid) ++ fsok = 1; ++ } ++ } else if (current->acl->group_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curgid = (int)gidlist[i]; ++ if (real == curgid) ++ break; ++ if (effective == curgid) ++ break; ++ if (fs == curgid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); ++ return 1; ++ } ++} ++ ++extern int gr_acl_is_capable(const int cap); ++ ++void ++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid) ++{ ++ struct acl_role_label *role = task->role; ++ struct acl_subject_label *subj = NULL; ++ struct acl_object_label *obj; ++ struct file *filp; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ filp = task->exec_file; ++ ++ /* kernel process, we'll give them the kernel role */ ++ if (unlikely(!filp)) { ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++ return; ++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) ++ role = lookup_acl_role_label(task, uid, gid); ++ ++ /* don't change the role if we're not a privileged process */ ++ if (role && task->role != role && ++ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) || ++ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID)))) ++ return; ++ ++ /* perform subject lookup in possibly new role ++ we can use this result below in the case where role == task->role ++ */ ++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role); ++ ++ /* if we changed uid/gid, but result in the same role ++ and are using inheritance, don't lose the inherited subject ++ if current subject is other than what normal lookup ++ would result in, we arrived via inheritance, don't ++ lose subject ++ */ ++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) && ++ (subj == task->acl))) ++ task->acl = subj; ++ ++ task->role = role; ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ ++ gr_set_proc_res(task); ++ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_flags) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *newacl; ++ struct acl_object_label *obj; ++ __u32 retmode; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ newacl = chk_subj_label(dentry, mnt, task->role); ++ ++ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then ++ did an exec ++ */ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) || ++ (task->parent->acl->mode & GR_POVERRIDE))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ goto skip_check; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) && ++ !(task->role->roletype & GR_ROLE_GOD) && ++ !gr_search_file(dentry, GR_PTRACERD, mnt) && ++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { ++ if (unsafe_flags & LSM_UNSAFE_SHARE) ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt); ++ else ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); ++ return -EACCES; ++ } ++ ++skip_check: ++ ++ obj = chk_obj_label(dentry, mnt, task->acl); ++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); ++ ++ if (!(task->acl->mode & GR_INHERITLEARN) && ++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { ++ if (obj->nested) ++ task->acl = obj->nested; ++ else ++ task->acl = newacl; ++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) ++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(dentry, mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(dentry, mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ return 0; ++} ++ ++/* always called with valid inodev ptr */ ++static void ++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev) ++{ ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *matchps; ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) ++ matchpo->mode |= GR_DELETED; ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ if (subj->inode == ino && subj->device == dev) ++ subj->mode |= GR_DELETED; ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL) ++ matchps->mode |= GR_DELETED; ++ FOR_EACH_ROLE_END(role) ++ ++ inodev->nentry->deleted = 1; ++ ++ return; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ struct inodev_entry *inodev; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ write_lock(&gr_inode_lock); ++ inodev = lookup_inodev_entry(ino, dev); ++ if (inodev != NULL) ++ do_handle_delete(inodev, ino, dev); ++ write_unlock(&gr_inode_lock); ++ ++ return; ++} ++ ++static void ++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ subj->obj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_obj_label(match, subj); ++ } ++ ++ return; ++} ++ ++static void ++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_role_label *role) ++{ ++ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ role->subj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_subj_label(match, role); ++ } ++ ++ return; ++} ++ ++static void ++update_inodev_entry(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice) ++{ ++ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != oldinode || ++ match->nentry->device != olddevice || !match->nentry->deleted)) ++ match = match->next; ++ ++ if (match && (match->nentry->inode == oldinode) ++ && (match->nentry->device == olddevice) && ++ match->nentry->deleted) { ++ if (match->prev == NULL) { ++ inodev_set.i_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->nentry->inode = newinode; ++ match->nentry->device = newdevice; ++ match->nentry->deleted = 0; ++ ++ insert_inodev_entry(match); ++ } ++ ++ return; ++} ++ ++static void ++__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev) ++{ ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ update_acl_subj_label(matchn->inode, matchn->device, ++ inode, dev, role); ++ ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ if ((subj->inode == inode) && (subj->device == dev)) { ++ subj->inode = inode; ++ subj->device = dev; ++ } ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ update_acl_obj_label(matchn->inode, matchn->device, ++ inode, dev, subj); ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_ROLE_END(role) ++ ++ update_inodev_entry(matchn->inode, matchn->device, inode, dev); ++ ++ return; ++} ++ ++static void ++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, ++ const struct vfsmount *mnt) ++{ ++ ino_t ino = dentry->d_inode->i_ino; ++ dev_t dev = __get_dev(dentry); ++ ++ __do_handle_create(matchn, ino, dev); ++ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ struct name_entry *matchn; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt)); ++ ++ if (unlikely((unsigned long)matchn)) { ++ write_lock(&gr_inode_lock); ++ do_handle_create(matchn, dentry, mnt); ++ write_unlock(&gr_inode_lock); ++ } ++ preempt_enable(); ++ ++ return; ++} ++ ++void ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) ++{ ++ struct name_entry *matchn; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt)); ++ ++ if (unlikely((unsigned long)matchn)) { ++ write_lock(&gr_inode_lock); ++ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev); ++ write_unlock(&gr_inode_lock); ++ } ++ preempt_enable(); ++ ++ return; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ struct name_entry *matchn; ++ struct inodev_entry *inodev; ++ struct inode *inode = new_dentry->d_inode; ++ ino_t oldinode = old_dentry->d_inode->i_ino; ++ dev_t olddev = __get_dev(old_dentry); ++ ++ /* vfs_rename swaps the name and parent link for old_dentry and ++ new_dentry ++ at this point, old_dentry has the new name, parent link, and inode ++ for the renamed file ++ if a file is being replaced by a rename, new_dentry has the inode ++ and name for the replaced file ++ */ ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt)); ++ ++ /* we wouldn't have to check d_inode if it weren't for ++ NFS silly-renaming ++ */ ++ ++ write_lock(&gr_inode_lock); ++ if (unlikely(replace && inode)) { ++ ino_t newinode = inode->i_ino; ++ dev_t newdev = __get_dev(new_dentry); ++ inodev = lookup_inodev_entry(newinode, newdev); ++ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode))) ++ do_handle_delete(inodev, newinode, newdev); ++ } ++ ++ inodev = lookup_inodev_entry(oldinode, olddev); ++ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode))) ++ do_handle_delete(inodev, oldinode, olddev); ++ ++ if (unlikely((unsigned long)matchn)) ++ do_handle_create(matchn, old_dentry, mnt); ++ ++ write_unlock(&gr_inode_lock); ++ preempt_enable(); ++ ++ return; ++} ++ ++static int ++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt, ++ unsigned char **sum) ++{ ++ struct acl_role_label *r; ++ struct role_allowed_ip *ipp; ++ struct role_transition *trans; ++ unsigned int i; ++ int found = 0; ++ u32 curr_ip = current->signal->curr_ip; ++ ++ current->signal->saved_ip = curr_ip; ++ ++ /* check transition table */ ++ ++ for (trans = current->role->transitions; trans; trans = trans->next) { ++ if (!strcmp(rolename, trans->rolename)) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ return 0; ++ ++ /* handle special roles that do not require authentication ++ and check ip */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ found = 0; ++ if (r->allowed_ips != NULL) { ++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { ++ if ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask)) ++ found = 1; ++ } ++ } else ++ found = 2; ++ if (!found) ++ return 0; ++ ++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) || ++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) { ++ *salt = NULL; ++ *sum = NULL; ++ return 1; ++ } ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ for (i = 0; i < num_sprole_pws; i++) { ++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) { ++ *salt = acl_special_roles[i]->salt; ++ *sum = acl_special_roles[i]->sum; ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++assign_special_role(char *rolename) ++{ ++ struct acl_object_label *obj; ++ struct acl_role_label *r; ++ struct acl_role_label *assigned = NULL; ++ struct task_struct *tsk; ++ struct file *filp; ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ assigned = r; ++ break; ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ if (!assigned) ++ return; ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ ++ tsk = current->real_parent; ++ if (tsk == NULL) ++ goto out_unlock; ++ ++ filp = tsk->exec_file; ++ if (filp == NULL) ++ goto out_unlock; ++ ++ tsk->is_writable = 0; ++ ++ tsk->acl_sp_role = 1; ++ tsk->acl_role_id = ++acl_sp_role_value; ++ tsk->role = assigned; ++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role); ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid); ++#endif ++ ++out_unlock: ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return; ++} ++ ++int gr_check_secure_terminal(struct task_struct *task) ++{ ++ struct task_struct *p, *p2, *p3; ++ struct files_struct *files; ++ struct fdtable *fdt; ++ struct file *our_file = NULL, *file; ++ int i; ++ ++ if (task->signal->tty == NULL) ++ return 1; ++ ++ files = get_files_struct(task); ++ if (files != NULL) { ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) { ++ get_file(file); ++ our_file = file; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } ++ ++ if (our_file == NULL) ++ return 1; ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ files = get_files_struct(p); ++ if (files == NULL || ++ (p->signal && p->signal->tty == task->signal->tty)) { ++ if (files != NULL) ++ put_files_struct(files); ++ continue; ++ } ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) && ++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) { ++ p3 = task; ++ while (p3->pid > 0) { ++ if (p3 == p) ++ break; ++ p3 = p3->real_parent; ++ } ++ if (p3 == p) ++ break; ++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p); ++ gr_handle_alertkill(p); ++ rcu_read_unlock(); ++ put_files_struct(files); ++ read_unlock(&tasklist_lock); ++ fput(our_file); ++ return 0; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ ++ fput(our_file); ++ return 1; ++} ++ ++ssize_t ++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos) ++{ ++ struct gr_arg_wrapper uwrap; ++ unsigned char *sprole_salt = NULL; ++ unsigned char *sprole_sum = NULL; ++ int error = sizeof (struct gr_arg_wrapper); ++ int error2 = 0; ++ ++ mutex_lock(&gr_dev_mutex); ++ ++ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ if (count != sizeof (struct gr_arg_wrapper)) { ++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper)); ++ error = -EINVAL; ++ goto out; ++ } ++ ++ ++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) { ++ gr_auth_expires = 0; ++ gr_auth_attempts = 0; ++ } ++ ++ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) { ++ error = -EFAULT; ++ goto out; ++ } ++ ++ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) { ++ error = -EINVAL; ++ goto out; ++ } ++ ++ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) { ++ error = -EFAULT; ++ goto out; ++ } ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(gr_auth_expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ /* if non-root trying to do anything other than use a special role, ++ do not attempt authentication, do not count towards authentication ++ locking ++ */ ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS && ++ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ current_uid()) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ /* ensure pw and special role name are null terminated */ ++ ++ gr_usermode->pw[GR_PW_LEN - 1] = '\0'; ++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; ++ ++ /* Okay. ++ * We have our enough of the argument structure..(we have yet ++ * to copy_from_user the tables themselves) . Copy the tables ++ * only if we need them, i.e. for loading operations. */ ++ ++ switch (gr_usermode->mode) { ++ case GR_STATUS: ++ if (gr_status & GR_READY) { ++ error = 1; ++ if (!gr_check_secure_terminal(current)) ++ error = 3; ++ } else ++ error = 2; ++ goto out; ++ case GR_SHUTDOWN: ++ if ((gr_status & GR_READY) ++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ pax_open_kernel(); ++ gr_status &= ~GR_READY; ++ pax_close_kernel(); ++ ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); ++ free_variables(); ++ memset(gr_usermode, 0, sizeof (struct gr_arg)); ++ memset(gr_system_salt, 0, GR_SALT_LEN); ++ memset(gr_system_sum, 0, GR_SHA_LEN); ++ } else if (gr_status & GR_READY) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); ++ error = -EPERM; ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); ++ error = -EAGAIN; ++ } ++ break; ++ case GR_ENABLE: ++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode))) ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); ++ else { ++ if (gr_status & GR_READY) ++ error = -EAGAIN; ++ else ++ error = error2; ++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); ++ } ++ break; ++ case GR_RELOAD: ++ if (!(gr_status & GR_READY)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); ++ error = -EAGAIN; ++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ lock_kernel(); ++ ++ pax_open_kernel(); ++ gr_status &= ~GR_READY; ++ pax_close_kernel(); ++ ++ free_variables(); ++ if (!(error2 = gracl_init(gr_usermode))) { ++ unlock_kernel(); ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); ++ } else { ++ unlock_kernel(); ++ error = error2; ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ } ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ error = -EPERM; ++ } ++ break; ++ case GR_SEGVMOD: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); ++ if (gr_usermode->segv_device && gr_usermode->segv_inode) { ++ struct acl_subject_label *segvacl; ++ segvacl = ++ lookup_acl_subj_label(gr_usermode->segv_inode, ++ gr_usermode->segv_device, ++ current->role); ++ if (segvacl) { ++ segvacl->crashes = 0; ++ segvacl->expires = 0; ++ } ++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { ++ gr_remove_uid(gr_usermode->segv_uid); ++ } ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); ++ error = -EPERM; ++ } ++ break; ++ case GR_SPROLE: ++ case GR_SPROLEPAM: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) { ++ current->role->expires = 0; ++ current->role->auth_attempts = 0; ++ } ++ ++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(current->role->expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ if (lookup_special_role_auth ++ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum) ++ && ((!sprole_salt && !sprole_sum) ++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { ++ char *p = ""; ++ assign_special_role(gr_usermode->sp_role); ++ read_lock(&tasklist_lock); ++ if (current->real_parent) ++ p = current->real_parent->role->rolename; ++ read_unlock(&tasklist_lock); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, ++ p, acl_sp_role_value); ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); ++ error = -EPERM; ++ if(!(current->role->auth_attempts++)) ++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ goto out; ++ } ++ break; ++ case GR_UNSPROLE: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->roletype & GR_ROLE_SPECIAL) { ++ char *p = ""; ++ int i = 0; ++ ++ read_lock(&tasklist_lock); ++ if (current->real_parent) { ++ p = current->real_parent->role->rolename; ++ i = current->real_parent->acl_role_id; ++ } ++ read_unlock(&tasklist_lock); ++ ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); ++ gr_set_acls(1); ++ } else { ++ error = -EPERM; ++ goto out; ++ } ++ break; ++ default: ++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); ++ error = -EINVAL; ++ break; ++ } ++ ++ if (error != -EPERM) ++ goto out; ++ ++ if(!(gr_auth_attempts++)) ++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ out: ++ mutex_unlock(&gr_dev_mutex); ++ return error; ++} ++ ++/* must be called with ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++*/ ++int gr_apply_subject_to_task(struct task_struct *task) ++{ ++ struct acl_object_label *obj; ++ char *tmpname; ++ struct acl_subject_label *tmpsubj; ++ struct file *filp; ++ struct name_entry *nmatch; ++ ++ filp = task->exec_file; ++ if (filp == NULL) ++ return 0; ++ ++ /* the following is to apply the correct subject ++ on binaries running when the RBAC system ++ is enabled, when the binaries have been ++ replaced or deleted since their execution ++ ----- ++ when the RBAC system starts, the inode/dev ++ from exec_file will be one the RBAC system ++ is unaware of. It only knows the inode/dev ++ of the present file on disk, or the absence ++ of it. ++ */ ++ preempt_disable(); ++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt); ++ ++ nmatch = lookup_name_entry(tmpname); ++ preempt_enable(); ++ tmpsubj = NULL; ++ if (nmatch) { ++ if (nmatch->deleted) ++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role); ++ else ++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role); ++ if (tmpsubj != NULL) ++ task->acl = tmpsubj; ++ } ++ if (tmpsubj == NULL) ++ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, ++ task->role); ++ if (task->acl) { ++ task->is_writable = 0; ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ } else { ++ return 1; ++ } ++ ++ return 0; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ struct task_struct *task, *task2; ++ struct acl_role_label *role = current->role; ++ __u16 acl_role_id = current->acl_role_id; ++ const struct cred *cred; ++ int ret; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ do_each_thread(task2, task) { ++ /* check to see if we're called from the exit handler, ++ if so, only replace ACLs that have inherited the admin ++ ACL */ ++ ++ if (type && (task->role != role || ++ task->acl_role_id != acl_role_id)) ++ continue; ++ ++ task->acl_role_id = 0; ++ task->acl_sp_role = 0; ++ ++ if (task->exec_file) { ++ cred = __task_cred(task); ++ task->role = lookup_acl_role_label(task, cred->uid, cred->gid); ++ ++ ret = gr_apply_subject_to_task(task); ++ if (ret) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid); ++ return ret; ++ } ++ } else { ++ // it's a kernel process ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN ++ task->acl->mode &= ~GR_PROCFIND; ++#endif ++ } ++ } while_each_thread(task2, task); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 0; ++} ++ ++void ++gr_learn_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ struct acl_subject_label *acl; ++ const struct cred *cred; ++ ++ if (unlikely((gr_status & GR_READY) && ++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) ++ goto skip_reslog; ++ ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ gr_log_resource(task, res, wanted, gt); ++#endif ++ skip_reslog: ++ ++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS)) ++ return; ++ ++ acl = task->acl; ++ ++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || ++ !(acl->resmask & (1 << (unsigned short) res)))) ++ return; ++ ++ if (wanted >= acl->res[res].rlim_cur) { ++ unsigned long res_add; ++ ++ res_add = wanted; ++ switch (res) { ++ case RLIMIT_CPU: ++ res_add += GR_RLIM_CPU_BUMP; ++ break; ++ case RLIMIT_FSIZE: ++ res_add += GR_RLIM_FSIZE_BUMP; ++ break; ++ case RLIMIT_DATA: ++ res_add += GR_RLIM_DATA_BUMP; ++ break; ++ case RLIMIT_STACK: ++ res_add += GR_RLIM_STACK_BUMP; ++ break; ++ case RLIMIT_CORE: ++ res_add += GR_RLIM_CORE_BUMP; ++ break; ++ case RLIMIT_RSS: ++ res_add += GR_RLIM_RSS_BUMP; ++ break; ++ case RLIMIT_NPROC: ++ res_add += GR_RLIM_NPROC_BUMP; ++ break; ++ case RLIMIT_NOFILE: ++ res_add += GR_RLIM_NOFILE_BUMP; ++ break; ++ case RLIMIT_MEMLOCK: ++ res_add += GR_RLIM_MEMLOCK_BUMP; ++ break; ++ case RLIMIT_AS: ++ res_add += GR_RLIM_AS_BUMP; ++ break; ++ case RLIMIT_LOCKS: ++ res_add += GR_RLIM_LOCKS_BUMP; ++ break; ++ case RLIMIT_SIGPENDING: ++ res_add += GR_RLIM_SIGPENDING_BUMP; ++ break; ++ case RLIMIT_MSGQUEUE: ++ res_add += GR_RLIM_MSGQUEUE_BUMP; ++ break; ++ case RLIMIT_NICE: ++ res_add += GR_RLIM_NICE_BUMP; ++ break; ++ case RLIMIT_RTPRIO: ++ res_add += GR_RLIM_RTPRIO_BUMP; ++ break; ++ case RLIMIT_RTTIME: ++ res_add += GR_RLIM_RTTIME_BUMP; ++ break; ++ } ++ ++ acl->res[res].rlim_cur = res_add; ++ ++ if (wanted > acl->res[res].rlim_max) ++ acl->res[res].rlim_max = res_add; ++ ++ /* only log the subject filename, since resource logging is supported for ++ single-subject learning only */ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, cred->uid, cred->gid, acl->filename, ++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max, ++ "", (unsigned long) res, &task->signal->saved_ip); ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++ ++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)) ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *proc; ++ unsigned long flags; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ flags = pax_get_flags(task); ++ ++ proc = task->acl; ++ ++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC) ++ flags &= ~MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC) ++ flags &= ~MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP) ++ flags &= ~MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP) ++ flags &= ~MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT) ++ flags &= ~MF_PAX_MPROTECT; ++ ++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC) ++ flags |= MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC) ++ flags |= MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP) ++ flags |= MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP) ++ flags |= MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT) ++ flags |= MF_PAX_MPROTECT; ++ ++ pax_set_flags(task, flags); ++ ++ return; ++} ++#endif ++ ++#ifdef CONFIG_SYSCTL ++/* Eric Biederman likes breaking userland ABI and every inode-based security ++ system to save 35kb of memory */ ++ ++/* we modify the passed in filename, but adjust it back before returning */ ++static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len) ++{ ++ struct name_entry *nmatch; ++ char *p, *lastp = NULL; ++ struct acl_object_label *obj = NULL, *tmp; ++ struct acl_subject_label *tmpsubj; ++ char c = '\0'; ++ ++ read_lock(&gr_inode_lock); ++ ++ p = name + len - 1; ++ do { ++ nmatch = lookup_name_entry(name); ++ if (lastp != NULL) ++ *lastp = c; ++ ++ if (nmatch == NULL) ++ goto next_component; ++ tmpsubj = current->acl; ++ do { ++ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj); ++ if (obj != NULL) { ++ tmp = obj->globbed; ++ while (tmp) { ++ if (!glob_match(tmp->filename, name)) { ++ obj = tmp; ++ goto found_obj; ++ } ++ tmp = tmp->next; ++ } ++ goto found_obj; ++ } ++ } while ((tmpsubj = tmpsubj->parent_subject)); ++next_component: ++ /* end case */ ++ if (p == name) ++ break; ++ ++ while (*p != '/') ++ p--; ++ if (p == name) ++ lastp = p + 1; ++ else { ++ lastp = p; ++ p--; ++ } ++ c = *lastp; ++ *lastp = '\0'; ++ } while (1); ++found_obj: ++ read_unlock(&gr_inode_lock); ++ /* obj returned will always be non-null */ ++ return obj; ++} ++ ++/* returns 0 when allowing, non-zero on error ++ op of 0 is used for readdir, so we don't log the names of hidden files ++*/ ++__u32 ++gr_handle_sysctl(const struct ctl_table *table, const int op) ++{ ++ ctl_table *tmp; ++ const char *proc_sys = "/proc/sys"; ++ char *path; ++ struct acl_object_label *obj; ++ unsigned short len = 0, pos = 0, depth = 0, i; ++ __u32 err = 0; ++ __u32 mode = 0; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ /* for now, ignore operations on non-sysctl entries if it's not a ++ readdir*/ ++ if (table->child != NULL && op != 0) ++ return 0; ++ ++ mode |= GR_FIND; ++ /* it's only a read if it's an entry, read on dirs is for readdir */ ++ if (op & MAY_READ) ++ mode |= GR_READ; ++ if (op & MAY_WRITE) ++ mode |= GR_WRITE; ++ ++ preempt_disable(); ++ ++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); ++ ++ /* it's only a read/write if it's an actual entry, not a dir ++ (which are opened for readdir) ++ */ ++ ++ /* convert the requested sysctl entry into a pathname */ ++ ++ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) { ++ len += strlen(tmp->procname); ++ len++; ++ depth++; ++ } ++ ++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) { ++ /* deny */ ++ goto out; ++ } ++ ++ memset(path, 0, PAGE_SIZE); ++ ++ memcpy(path, proc_sys, strlen(proc_sys)); ++ ++ pos += strlen(proc_sys); ++ ++ for (; depth > 0; depth--) { ++ path[pos] = '/'; ++ pos++; ++ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) { ++ if (depth == i) { ++ memcpy(path + pos, tmp->procname, ++ strlen(tmp->procname)); ++ pos += strlen(tmp->procname); ++ } ++ i++; ++ } ++ } ++ ++ obj = gr_lookup_by_name(path, pos); ++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS); ++ ++ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) && ++ ((err & mode) != mode))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ err = 0; ++ gr_log_learn_sysctl(path, new_mode); ++ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) { ++ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path); ++ err = -ENOENT; ++ } else if (!(err & GR_FIND)) { ++ err = -ENOENT; ++ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) { ++ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied", ++ path, (mode & GR_READ) ? " reading" : "", ++ (mode & GR_WRITE) ? " writing" : ""); ++ err = -EACCES; ++ } else if ((err & mode) != mode) { ++ err = -EACCES; ++ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) { ++ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful", ++ path, (mode & GR_READ) ? " reading" : "", ++ (mode & GR_WRITE) ? " writing" : ""); ++ err = 0; ++ } else ++ err = 0; ++ ++ out: ++ preempt_enable(); ++ ++ return err; ++} ++#endif ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ struct file *filp; ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ filp = task->exec_file; ++ ++ while (tmp->pid > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->real_parent; ++ } ++ ++ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 1; ++ } ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 0; ++ } ++#endif ++ ++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ ++ if (retmode & GR_NOPTRACE) ++ return 1; ++ ++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) ++ && (current->acl != task->acl || (current->acl != current->role->root_label ++ && current->pid != task->pid))) ++ return 1; ++ ++ return 0; ++} ++ ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ if (!(current->role->roletype & GR_ROLE_GOD)) ++ return; ++ ++ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n", ++ p->role->rolename, gr_task_roletype_to_char(p), ++ p->acl->filename); ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ ++ if (request == PTRACE_ATTACH) { ++ read_lock(&tasklist_lock); ++ while (tmp->pid > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->real_parent; ++ } ++ ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { ++ read_unlock(&tasklist_lock); ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ read_unlock(&tasklist_lock); ++ } ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) ++ return 0; ++#endif ++ ++ read_lock(&grsec_exec_file_lock); ++ if (unlikely(!task->exec_file)) { ++ read_unlock(&grsec_exec_file_lock); ++ return 0; ++ } ++ ++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ ++ if (retmode & GR_NOPTRACE) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ if (retmode & GR_PTRACERD) { ++ switch (request) { ++ case PTRACE_POKETEXT: ++ case PTRACE_POKEDATA: ++ case PTRACE_POKEUSR: ++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) ++ case PTRACE_SETREGS: ++ case PTRACE_SETFPREGS: ++#endif ++#ifdef CONFIG_X86 ++ case PTRACE_SETFPXREGS: ++#endif ++#ifdef CONFIG_ALTIVEC ++ case PTRACE_SETVRREGS: ++#endif ++ return 1; ++ default: ++ return 0; ++ } ++ } else if (!(current->acl->mode & GR_POVERRIDE) && ++ !(current->role->roletype & GR_ROLE_GOD) && ++ (current->acl != task->acl)) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int is_writable_mmap(const struct file *filp) ++{ ++ struct task_struct *task = current; ++ struct acl_object_label *obj, *obj2; ++ ++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) && ++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) { ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, ++ task->role->root_label); ++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ unsigned long runtime; ++ unsigned long cputime; ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ struct timespec timeval; ++ ++ if (unlikely(!(gr_status & GR_READY) || !task->acl || ++ !(task->acl->mode & GR_PROCACCT))) ++ return; ++ ++ do_posix_clock_monotonic_gettime(&timeval); ++ runtime = timeval.tv_sec - task->start_time.tv_sec; ++ wday = runtime / (3600 * 24); ++ runtime -= wday * (3600 * 24); ++ whr = runtime / 3600; ++ runtime -= whr * 3600; ++ wmin = runtime / 60; ++ runtime -= wmin * 60; ++ wsec = runtime; ++ ++ cputime = (task->utime + task->stime) / HZ; ++ cday = cputime / (3600 * 24); ++ cputime -= cday * (3600 * 24); ++ chr = cputime / 3600; ++ cputime -= chr * 3600; ++ cmin = cputime / 60; ++ cputime -= cmin * 60; ++ csec = cputime; ++ ++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); ++ ++ return; ++} ++ ++void gr_set_kernel_label(struct task_struct *task) ++{ ++ if (gr_status & GR_READY) { ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++ } ++ return; ++} ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ struct task_struct *task; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred; ++#endif ++ int ret = 0; ++ ++ /* restrict taskstats viewing to un-chrooted root users ++ who have the 'view' subject flag if the RBAC system is enabled ++ */ ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ task = find_task_by_vpid(pid); ++ if (task) { ++#ifdef CONFIG_GRKERNSEC_CHROOT ++ if (proc_is_chrooted(task)) ++ ret = -EACCES; ++#endif ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ cred = __task_cred(task); ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (cred->uid != 0) ++ ret = -EACCES; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID)) ++ ret = -EACCES; ++#endif ++#endif ++ if (gr_status & GR_READY) { ++ if (!(task->acl->mode & GR_VIEW)) ++ ret = -EACCES; ++ } ++ } else ++ ret = -ENOENT; ++ ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return ret; ++} ++#endif ++ ++/* AUXV entries are filled via a descendant of search_binary_handler ++ after we've already applied the subject for the target ++*/ ++int gr_acl_enable_at_secure(void) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & GR_ATSECURE) ++ return 1; ++ ++ return 0; ++} ++ ++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino) ++{ ++ struct task_struct *task = current; ++ struct dentry *dentry = file->f_path.dentry; ++ struct vfsmount *mnt = file->f_path.mnt; ++ struct acl_object_label *obj, *tmp; ++ struct acl_subject_label *subj; ++ unsigned int bufsize; ++ int is_not_root; ++ char *path; ++ dev_t dev = __get_dev(dentry); ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 1; ++ ++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return 1; ++ ++ /* ignore Eric Biederman */ ++ if (IS_PRIVATE(dentry->d_inode)) ++ return 1; ++ ++ subj = task->acl; ++ do { ++ obj = lookup_acl_obj_label(ino, dev, subj); ++ if (obj != NULL) ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ } while ((subj = subj->parent_subject)); ++ ++ /* this is purely an optimization since we're looking for an object ++ for the directory we're doing a readdir on ++ if it's possible for any globbed object to match the entry we're ++ filling into the directory, then the object we find here will be ++ an anchor point with attached globbed objects ++ */ ++ obj = chk_obj_label_noglob(dentry, mnt, task->acl); ++ if (obj->globbed == NULL) ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ ++ is_not_root = ((obj->filename[0] == '/') && ++ (obj->filename[1] == '\0')) ? 0 : 1; ++ bufsize = PAGE_SIZE - namelen - is_not_root; ++ ++ /* check bufsize > PAGE_SIZE || bufsize == 0 */ ++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1))) ++ return 1; ++ ++ preempt_disable(); ++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ bufsize); ++ ++ bufsize = strlen(path); ++ ++ /* if base is "/", don't append an additional slash */ ++ if (is_not_root) ++ *(path + bufsize) = '/'; ++ memcpy(path + bufsize + is_not_root, name, namelen); ++ *(path + bufsize + namelen + is_not_root) = '\0'; ++ ++ tmp = obj->globbed; ++ while (tmp) { ++ if (!glob_match(tmp->filename, path)) { ++ preempt_enable(); ++ return (tmp->mode & GR_FIND) ? 1 : 0; ++ } ++ tmp = tmp->next; ++ } ++ preempt_enable(); ++ return (obj->mode & GR_FIND) ? 1 : 0; ++} ++ ++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE ++EXPORT_SYMBOL(gr_acl_is_enabled); ++#endif ++EXPORT_SYMBOL(gr_learn_resource); ++EXPORT_SYMBOL(gr_set_kernel_label); ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL(gr_check_user_change); ++EXPORT_SYMBOL(gr_check_group_change); ++#endif ++ +diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c +new file mode 100644 +index 0000000..34fefda +--- /dev/null ++++ b/grsecurity/gracl_alloc.c +@@ -0,0 +1,105 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++ ++static unsigned long alloc_stack_next = 1; ++static unsigned long alloc_stack_size = 1; ++static void **alloc_stack; ++ ++static __inline__ int ++alloc_pop(void) ++{ ++ if (alloc_stack_next == 1) ++ return 0; ++ ++ kfree(alloc_stack[alloc_stack_next - 2]); ++ ++ alloc_stack_next--; ++ ++ return 1; ++} ++ ++static __inline__ int ++alloc_push(void *buf) ++{ ++ if (alloc_stack_next >= alloc_stack_size) ++ return 1; ++ ++ alloc_stack[alloc_stack_next - 1] = buf; ++ ++ alloc_stack_next++; ++ ++ return 0; ++} ++ ++void * ++acl_alloc(unsigned long len) ++{ ++ void *ret = NULL; ++ ++ if (!len || len > PAGE_SIZE) ++ goto out; ++ ++ ret = kmalloc(len, GFP_KERNEL); ++ ++ if (ret) { ++ if (alloc_push(ret)) { ++ kfree(ret); ++ ret = NULL; ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++void * ++acl_alloc_num(unsigned long num, unsigned long len) ++{ ++ if (!len || (num > (PAGE_SIZE / len))) ++ return NULL; ++ ++ return acl_alloc(num * len); ++} ++ ++void ++acl_free_all(void) ++{ ++ if (gr_acl_is_enabled() || !alloc_stack) ++ return; ++ ++ while (alloc_pop()) ; ++ ++ if (alloc_stack) { ++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) ++ kfree(alloc_stack); ++ else ++ vfree(alloc_stack); ++ } ++ ++ alloc_stack = NULL; ++ alloc_stack_size = 1; ++ alloc_stack_next = 1; ++ ++ return; ++} ++ ++int ++acl_alloc_stack_init(unsigned long size) ++{ ++ if ((size * sizeof (void *)) <= PAGE_SIZE) ++ alloc_stack = ++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); ++ else ++ alloc_stack = (void **) vmalloc(size * sizeof (void *)); ++ ++ alloc_stack_size = size; ++ ++ if (!alloc_stack) ++ return 0; ++ else ++ return 1; ++} +diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c +new file mode 100644 +index 0000000..955ddfb +--- /dev/null ++++ b/grsecurity/gracl_cap.c +@@ -0,0 +1,101 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++extern const char *captab_log[]; ++extern int captab_log_entries; ++ ++int ++gr_acl_is_capable(const int cap) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ kernel_cap_t cap_audit = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = task->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ cap_audit = curracl->cap_invert_audit; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ if (cap_raised(curracl->cap_invert_audit, cap)) ++ cap_raise(cap_audit, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) { ++ if (cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]); ++ return 1; ++ } ++ ++ curracl = task->acl; ++ ++ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ && cap_raised(cred->cap_effective, cap)) { ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, cred->uid, ++ cred->gid, task->exec_file ? ++ gr_to_filename(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : curracl->filename, ++ curracl->filename, 0UL, ++ 0UL, "", (unsigned long) cap, &task->signal->saved_ip); ++ return 1; ++ } ++ ++ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); ++ return 0; ++} ++ ++int ++gr_acl_is_capable_nolog(const int cap) ++{ ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = current->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) ++ return 1; ++ ++ return 0; ++} ++ +diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c +new file mode 100644 +index 0000000..8c4595a +--- /dev/null ++++ b/grsecurity/gracl_fs.c +@@ -0,0 +1,435 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/types.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/stat.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++umode_t ++gr_acl_umask(void) ++{ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ return current->role->umask; ++} ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ __u32 mode; ++ ++ if (unlikely(!dentry->d_inode)) ++ return GR_FIND; ++ ++ mode = ++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); ++ ++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return mode; ++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_FIND))) ++ return 0; ++ ++ return GR_FIND; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ int acc_mode) ++{ ++ __u32 reqmode = GR_FIND; ++ __u32 mode; ++ ++ if (unlikely(!dentry->d_inode)) ++ return reqmode; ++ ++ if (acc_mode & MAY_APPEND) ++ reqmode |= GR_APPEND; ++ else if (acc_mode & MAY_WRITE) ++ reqmode |= GR_WRITE; ++ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode)) ++ reqmode |= GR_READ; ++ ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode, ++ const int imode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ if (acc_mode & MAY_APPEND) ++ reqmode |= GR_APPEND; ++ // if a directory was required or the directory already exists, then ++ // don't count this open as a read ++ if ((acc_mode & MAY_READ) && ++ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)))) ++ reqmode |= GR_READ; ++ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID))) ++ reqmode |= GR_SETID; ++ ++ mode = ++ gr_check_create(dentry, p_dentry, p_mnt, ++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, ++ const int fmode) ++{ ++ __u32 mode, reqmode = GR_FIND; ++ ++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode)) ++ reqmode |= GR_EXEC; ++ if (fmode & S_IWOTH) ++ reqmode |= GR_WRITE; ++ if (fmode & S_IROTH) ++ reqmode |= GR_READ; ++ ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, ++ umode_t *modeptr) ++{ ++ umode_t mode; ++ ++ *modeptr &= ~gr_acl_umask(); ++ mode = *modeptr; ++ ++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode))) ++ return 1; ++ ++ if (unlikely(mode & (S_ISUID | S_ISGID))) { ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, ++ GR_CHMOD_ACL_MSG); ++ } else { ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); ++ } ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, ++ GR_UNIXCONNECT_ACL_MSG); ++} ++ ++/* hardlinks require at minimum create and link permission, ++ any additional privilege required is based on the ++ privilege of the file being linked to ++*/ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const char *to) ++{ ++ __u32 mode; ++ __u32 needmode = GR_CREATE | GR_LINK; ++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; ++ ++ mode = ++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, ++ old_mnt); ++ ++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const char *from) ++{ ++ __u32 needmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ mode = ++ gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_CREATE | GR_AUDIT_CREATE | ++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); ++ ++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { ++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return (GR_WRITE | GR_CREATE); ++} ++ ++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ if (unlikely(mode & (S_ISUID | S_ISGID))) ++ reqmode |= GR_SETID; ++ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ reqmode, GR_MKNOD_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt) ++{ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); ++} ++ ++#define RENAME_CHECK_SUCCESS(old, new) \ ++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ ++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) ++ ++int ++gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const char *newname) ++{ ++ __u32 comp1, comp2; ++ int error = 0; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (!new_dentry->d_inode) { ++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | ++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); ++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, old_mnt); ++ } else { ++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | ++ GR_CREATE | GR_DELETE | ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, parent_mnt); ++ comp2 = ++ gr_search_file(old_dentry, ++ GR_READ | GR_WRITE | GR_AUDIT_READ | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); ++ } ++ ++ if (RENAME_CHECK_SUCCESS(comp1, comp2) && ++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); ++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) ++ && !(comp2 & GR_SUPPRESS)) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); ++ error = -EACCES; ++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) ++ error = -EACCES; ++ ++ return error; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ u16 id; ++ char *rolename; ++ struct file *exec_file; ++ ++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() && ++ !(current->role->roletype & GR_ROLE_PERSIST))) { ++ id = current->acl_role_id; ++ rolename = current->role->rolename; ++ gr_set_acls(1); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); ++ } ++ ++ write_lock(&grsec_exec_file_lock); ++ exec_file = current->exec_file; ++ current->exec_file = NULL; ++ write_unlock(&grsec_exec_file_lock); ++ ++ if (exec_file) ++ fput(exec_file); ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (task != current && task->acl->mode & GR_PROTPROCFD) ++ return -EACCES; ++ ++ return 0; ++} +diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c +new file mode 100644 +index 0000000..dd925aa +--- /dev/null ++++ b/grsecurity/gracl_ip.c +@@ -0,0 +1,385 @@ ++#include <linux/kernel.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/skbuff.h> ++#include <linux/ip.h> ++#include <linux/udp.h> ++#include <linux/smp_lock.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/netdevice.h> ++#include <linux/inetdevice.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++#define GR_BIND 0x01 ++#define GR_CONNECT 0x02 ++#define GR_INVERT 0x04 ++#define GR_BINDOVERRIDE 0x08 ++#define GR_CONNECTOVERRIDE 0x10 ++#define GR_SOCK_FAMILY 0x20 ++ ++static const char * gr_protocols[IPPROTO_MAX] = { ++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", ++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", ++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", ++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", ++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", ++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", ++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", ++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", ++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", ++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", ++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", ++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", ++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", ++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", ++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", ++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", ++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", ++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", ++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", ++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", ++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", ++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", ++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", ++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", ++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", ++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", ++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", ++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", ++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", ++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", ++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", ++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", ++ }; ++ ++static const char * gr_socktypes[SOCK_MAX] = { ++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", ++ "unknown:7", "unknown:8", "unknown:9", "packet" ++ }; ++ ++static const char * gr_sockfamilies[AF_MAX+1] = { ++ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25", ++ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash", ++ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28", ++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154" ++ }; ++ ++const char * ++gr_proto_to_name(unsigned char proto) ++{ ++ return gr_protocols[proto]; ++} ++ ++const char * ++gr_socktype_to_name(unsigned char type) ++{ ++ return gr_socktypes[type]; ++} ++ ++const char * ++gr_sockfamily_to_name(unsigned char family) ++{ ++ return gr_sockfamilies[family]; ++} ++ ++int ++gr_search_socket(const int domain, const int type, const int protocol) ++{ ++ struct acl_subject_label *curr; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ goto exit; ++ ++ if ((domain < 0) || (type < 0) || (protocol < 0) || ++ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX)) ++ goto exit; // let the kernel handle it ++ ++ curr = current->acl; ++ ++ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) { ++ /* the family is allowed, if this is PF_INET allow it only if ++ the extra sock type/protocol checks pass */ ++ if (domain == PF_INET) ++ goto inet_check; ++ goto exit; ++ } else { ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, domain, 0, 0, GR_SOCK_FAMILY, ++ ¤t->signal->saved_ip); ++ goto exit; ++ } ++ goto exit_fail; ++ } ++ ++inet_check: ++ /* the rest of this checking is for IPv4 only */ ++ if (!curr->ips) ++ goto exit; ++ ++ if ((curr->ip_type & (1 << type)) && ++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32)))) ++ goto exit; ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ /* we don't place acls on raw sockets , and sometimes ++ dgram/ip sockets are opened for ioctl and not ++ bind/connect, so we'll fake a bind learn log */ ++ if (type == SOCK_RAW || type == SOCK_PACKET) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_CONNECT, ¤t->signal->saved_ip); ++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_BIND, ¤t->signal->saved_ip); ++ } ++ /* we'll log when they use connect or bind */ ++ goto exit; ++ } ++ ++exit_fail: ++ if (domain == PF_INET) ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), ++ gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ else ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), ++ gr_socktype_to_name(type), protocol); ++ ++ return 0; ++exit: ++ return 1; ++} ++ ++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask) ++{ ++ if ((ip->mode & mode) && ++ (ip_port >= ip->low) && ++ (ip_port <= ip->high) && ++ ((ntohl(ip_addr) & our_netmask) == ++ (ntohl(our_addr) & our_netmask)) ++ && (ip->proto[protocol / 32] & (1 << (protocol % 32))) ++ && (ip->type & (1 << type))) { ++ if (ip->mode & GR_INVERT) ++ return 2; // specifically denied ++ else ++ return 1; // allowed ++ } ++ ++ return 0; // not specifically allowed, may continue parsing ++} ++ ++static int ++gr_search_connectbind(const int full_mode, struct sock *sk, ++ struct sockaddr_in *addr, const int type) ++{ ++ char iface[IFNAMSIZ] = {0}; ++ struct acl_subject_label *curr; ++ struct acl_ip_label *ip; ++ struct inet_sock *isk; ++ struct net_device *dev; ++ struct in_device *idev; ++ unsigned long i; ++ int ret; ++ int mode = full_mode & (GR_BIND | GR_CONNECT); ++ __u32 ip_addr = 0; ++ __u32 our_addr; ++ __u32 our_netmask; ++ char *p; ++ __u16 ip_port = 0; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET)) ++ return 0; ++ ++ curr = current->acl; ++ isk = inet_sk(sk); ++ ++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */ ++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) ++ addr->sin_addr.s_addr = curr->inaddr_any_override; ++ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) { ++ struct sockaddr_in saddr; ++ int err; ++ ++ saddr.sin_family = AF_INET; ++ saddr.sin_addr.s_addr = curr->inaddr_any_override; ++ saddr.sin_port = isk->sport; ++ ++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ ++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ } ++ ++ if (!curr->ips) ++ return 0; ++ ++ ip_addr = addr->sin_addr.s_addr; ++ ip_port = ntohs(addr->sin_port); ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &ip_addr, ip_port, type, ++ sk->sk_protocol, mode, ¤t->signal->saved_ip); ++ return 0; ++ } ++ ++ for (i = 0; i < curr->ip_num; i++) { ++ ip = *(curr->ips + i); ++ if (ip->iface != NULL) { ++ strncpy(iface, ip->iface, IFNAMSIZ - 1); ++ p = strchr(iface, ':'); ++ if (p != NULL) ++ *p = '\0'; ++ dev = dev_get_by_name(sock_net(sk), iface); ++ if (dev == NULL) ++ continue; ++ idev = in_dev_get(dev); ++ if (idev == NULL) { ++ dev_put(dev); ++ continue; ++ } ++ rcu_read_lock(); ++ for_ifa(idev) { ++ if (!strcmp(ip->iface, ifa->ifa_label)) { ++ our_addr = ifa->ifa_address; ++ our_netmask = 0xffffffff; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ return 0; ++ } else if (ret == 2) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ goto denied; ++ } ++ } ++ } endfor_ifa(idev); ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ } else { ++ our_addr = ip->addr; ++ our_netmask = ip->netmask; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) ++ return 0; ++ else if (ret == 2) ++ goto denied; ++ } ++ } ++ ++denied: ++ if (mode == GR_BIND) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ else if (mode == GR_CONNECT) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ ++ return -EACCES; ++} ++ ++int ++gr_search_connect(struct socket *sock, struct sockaddr_in *addr) ++{ ++ /* always allow disconnection of dgram sockets with connect */ ++ if (addr->sin_family == AF_UNSPEC) ++ return 0; ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int ++gr_search_bind(struct socket *sock, struct sockaddr_in *addr) ++{ ++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int gr_search_listen(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->saddr; ++ addr.sin_port = inet_sk(sk)->sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int gr_search_accept(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->saddr; ++ addr.sin_port = inet_sk(sk)->sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int ++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr) ++{ ++ if (addr) ++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); ++ else { ++ struct sockaddr_in sin; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ sin.sin_addr.s_addr = inet->daddr; ++ sin.sin_port = inet->dport; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++ } ++} ++ ++int ++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb) ++{ ++ struct sockaddr_in sin; ++ ++ if (unlikely(skb->len < sizeof (struct udphdr))) ++ return 0; // skip this packet ++ ++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr; ++ sin.sin_port = udp_hdr(skb)->source; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++} +diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c +new file mode 100644 +index 0000000..34bdd46 +--- /dev/null ++++ b/grsecurity/gracl_learn.c +@@ -0,0 +1,208 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/poll.h> ++#include <linux/smp_lock.h> ++#include <linux/string.h> ++#include <linux/file.h> ++#include <linux/types.h> ++#include <linux/vmalloc.h> ++#include <linux/grinternal.h> ++ ++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf, ++ size_t count, loff_t *ppos); ++extern int gr_acl_is_enabled(void); ++ ++static DECLARE_WAIT_QUEUE_HEAD(learn_wait); ++static int gr_learn_attached; ++ ++/* use a 512k buffer */ ++#define LEARN_BUFFER_SIZE (512 * 1024) ++ ++static DEFINE_SPINLOCK(gr_learn_lock); ++static DEFINE_MUTEX(gr_learn_user_mutex); ++ ++/* we need to maintain two buffers, so that the kernel context of grlearn ++ uses a semaphore around the userspace copying, and the other kernel contexts ++ use a spinlock when copying into the buffer, since they cannot sleep ++*/ ++static char *learn_buffer; ++static char *learn_buffer_user; ++static int learn_buffer_len; ++static int learn_buffer_user_len; ++ ++static ssize_t ++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos) ++{ ++ DECLARE_WAITQUEUE(wait, current); ++ ssize_t retval = 0; ++ ++ add_wait_queue(&learn_wait, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ do { ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ if (learn_buffer_len) ++ break; ++ spin_unlock(&gr_learn_lock); ++ mutex_unlock(&gr_learn_user_mutex); ++ if (file->f_flags & O_NONBLOCK) { ++ retval = -EAGAIN; ++ goto out; ++ } ++ if (signal_pending(current)) { ++ retval = -ERESTARTSYS; ++ goto out; ++ } ++ ++ schedule(); ++ } while (1); ++ ++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len); ++ learn_buffer_user_len = learn_buffer_len; ++ retval = learn_buffer_len; ++ learn_buffer_len = 0; ++ ++ spin_unlock(&gr_learn_lock); ++ ++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len)) ++ retval = -EFAULT; ++ ++ mutex_unlock(&gr_learn_user_mutex); ++out: ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&learn_wait, &wait); ++ return retval; ++} ++ ++static unsigned int ++poll_learn(struct file * file, poll_table * wait) ++{ ++ poll_wait(file, &learn_wait, wait); ++ ++ if (learn_buffer_len) ++ return (POLLIN | POLLRDNORM); ++ ++ return 0; ++} ++ ++void ++gr_clear_learn_entries(void) ++{ ++ char *tmp; ++ ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ if (tmp) ++ vfree(tmp); ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ mutex_unlock(&gr_learn_user_mutex); ++ ++ return; ++} ++ ++void ++gr_add_learn_entry(const char *fmt, ...) ++{ ++ va_list args; ++ unsigned int len; ++ ++ if (!gr_learn_attached) ++ return; ++ ++ spin_lock(&gr_learn_lock); ++ ++ /* leave a gap at the end so we know when it's "full" but don't have to ++ compute the exact length of the string we're trying to append ++ */ ++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ return; ++ } ++ if (learn_buffer == NULL) { ++ spin_unlock(&gr_learn_lock); ++ return; ++ } ++ ++ va_start(args, fmt); ++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); ++ va_end(args); ++ ++ learn_buffer_len += len + 1; ++ ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ ++ return; ++} ++ ++static int ++open_learn(struct inode *inode, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ && gr_learn_attached) ++ return -EBUSY; ++ if (file->f_mode & FMODE_READ) { ++ int retval = 0; ++ mutex_lock(&gr_learn_user_mutex); ++ if (learn_buffer == NULL) ++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer_user == NULL) ++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ if (learn_buffer_user == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 1; ++out_error: ++ mutex_unlock(&gr_learn_user_mutex); ++ return retval; ++ } ++ return 0; ++} ++ ++static int ++close_learn(struct inode *inode, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ) { ++ char *tmp = NULL; ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ if (tmp) ++ vfree(tmp); ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 0; ++ mutex_unlock(&gr_learn_user_mutex); ++ } ++ ++ return 0; ++} ++ ++const struct file_operations grsec_fops = { ++ .read = read_learn, ++ .write = write_grsec_handler, ++ .open = open_learn, ++ .release = close_learn, ++ .poll = poll_learn, ++}; +diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c +new file mode 100644 +index 0000000..70b2179 +--- /dev/null ++++ b/grsecurity/gracl_res.c +@@ -0,0 +1,67 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grinternal.h> ++ ++static const char *restab_log[] = { ++ [RLIMIT_CPU] = "RLIMIT_CPU", ++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE", ++ [RLIMIT_DATA] = "RLIMIT_DATA", ++ [RLIMIT_STACK] = "RLIMIT_STACK", ++ [RLIMIT_CORE] = "RLIMIT_CORE", ++ [RLIMIT_RSS] = "RLIMIT_RSS", ++ [RLIMIT_NPROC] = "RLIMIT_NPROC", ++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE", ++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK", ++ [RLIMIT_AS] = "RLIMIT_AS", ++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS", ++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING", ++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE", ++ [RLIMIT_NICE] = "RLIMIT_NICE", ++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO", ++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME", ++ [GR_CRASH_RES] = "RLIMIT_CRASH" ++}; ++ ++void ++gr_log_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ const struct cred *cred; ++ unsigned long rlim; ++ ++ if (!gr_acl_is_enabled() && !grsec_resource_logging) ++ return; ++ ++ // not yet supported resource ++ if (unlikely(!restab_log[res])) ++ return; ++ ++ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME) ++ rlim = task->signal->rlim[res].rlim_max; ++ else ++ rlim = task->signal->rlim[res].rlim_cur; ++ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim))) ++ return; ++ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ ++ if (res == RLIMIT_NPROC && ++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || ++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE))) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_MEMLOCK && ++ cap_raised(cred->cap_effective, CAP_IPC_LOCK)) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE)) ++ goto out_rcu_unlock; ++ rcu_read_unlock(); ++ ++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim); ++ ++ return; ++out_rcu_unlock: ++ rcu_read_unlock(); ++ return; ++} +diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c +new file mode 100644 +index 0000000..1d1b734 +--- /dev/null ++++ b/grsecurity/gracl_segv.c +@@ -0,0 +1,284 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/smp_lock.h> ++#include <linux/slab.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/timer.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++static struct crash_uid *uid_set; ++static unsigned short uid_used; ++static DEFINE_SPINLOCK(gr_uid_lock); ++extern rwlock_t gr_inode_lock; ++extern struct acl_subject_label * ++ lookup_acl_subj_label(const ino_t inode, const dev_t dev, ++ struct acl_role_label *role); ++extern int gr_fake_force_sig(int sig, struct task_struct *t); ++ ++int ++gr_init_uidset(void) ++{ ++ uid_set = ++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); ++ uid_used = 0; ++ ++ return uid_set ? 1 : 0; ++} ++ ++void ++gr_free_uidset(void) ++{ ++ if (uid_set) ++ kfree(uid_set); ++ ++ return; ++} ++ ++int ++gr_find_uid(const uid_t uid) ++{ ++ struct crash_uid *tmp = uid_set; ++ uid_t buid; ++ int low = 0, high = uid_used - 1, mid; ++ ++ while (high >= low) { ++ mid = (low + high) >> 1; ++ buid = tmp[mid].uid; ++ if (buid == uid) ++ return mid; ++ if (buid > uid) ++ high = mid - 1; ++ if (buid < uid) ++ low = mid + 1; ++ } ++ ++ return -1; ++} ++ ++static __inline__ void ++gr_insertsort(void) ++{ ++ unsigned short i, j; ++ struct crash_uid index; ++ ++ for (i = 1; i < uid_used; i++) { ++ index = uid_set[i]; ++ j = i; ++ while ((j > 0) && uid_set[j - 1].uid > index.uid) { ++ uid_set[j] = uid_set[j - 1]; ++ j--; ++ } ++ uid_set[j] = index; ++ } ++ ++ return; ++} ++ ++static __inline__ void ++gr_insert_uid(const uid_t uid, const unsigned long expires) ++{ ++ int loc; ++ ++ if (uid_used == GR_UIDTABLE_MAX) ++ return; ++ ++ loc = gr_find_uid(uid); ++ ++ if (loc >= 0) { ++ uid_set[loc].expires = expires; ++ return; ++ } ++ ++ uid_set[uid_used].uid = uid; ++ uid_set[uid_used].expires = expires; ++ uid_used++; ++ ++ gr_insertsort(); ++ ++ return; ++} ++ ++void ++gr_remove_uid(const unsigned short loc) ++{ ++ unsigned short i; ++ ++ for (i = loc + 1; i < uid_used; i++) ++ uid_set[i - 1] = uid_set[i]; ++ ++ uid_used--; ++ ++ return; ++} ++ ++int ++gr_check_crash_uid(const uid_t uid) ++{ ++ int loc; ++ int ret = 0; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ spin_lock(&gr_uid_lock); ++ loc = gr_find_uid(uid); ++ ++ if (loc < 0) ++ goto out_unlock; ++ ++ if (time_before_eq(uid_set[loc].expires, get_seconds())) ++ gr_remove_uid(loc); ++ else ++ ret = 1; ++ ++out_unlock: ++ spin_unlock(&gr_uid_lock); ++ return ret; ++} ++ ++static __inline__ int ++proc_is_setxid(const struct cred *cred) ++{ ++ if (cred->uid != cred->euid || cred->uid != cred->suid || ++ cred->uid != cred->fsuid) ++ return 1; ++ if (cred->gid != cred->egid || cred->gid != cred->sgid || ++ cred->gid != cred->fsgid) ++ return 1; ++ ++ return 0; ++} ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ struct acl_subject_label *curr; ++ struct task_struct *tsk, *tsk2; ++ const struct cred *cred; ++ const struct cred *cred2; ++ ++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) ++ return; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curr = task->acl; ++ ++ if (!(curr->resmask & (1 << GR_CRASH_RES))) ++ return; ++ ++ if (time_before_eq(curr->expires, get_seconds())) { ++ curr->expires = 0; ++ curr->crashes = 0; ++ } ++ ++ curr->crashes++; ++ ++ if (!curr->expires) ++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) { ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ if (cred->uid && proc_is_setxid(cred)) { ++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ spin_lock(&gr_uid_lock); ++ gr_insert_uid(cred->uid, curr->expires); ++ spin_unlock(&gr_uid_lock); ++ curr->expires = 0; ++ curr->crashes = 0; ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ cred2 = __task_cred(tsk); ++ if (tsk != task && cred2->uid == cred->uid) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } else { ++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ do_each_thread(tsk2, tsk) { ++ if (likely(tsk != task)) { ++ // if this thread has the same subject as the one that triggered ++ // RES_CRASH and it's the same binary, kill it ++ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ } ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ struct acl_subject_label *curr; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ read_lock(&gr_inode_lock); ++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino, ++ filp->f_path.dentry->d_inode->i_sb->s_dev, ++ current->role); ++ read_unlock(&gr_inode_lock); ++ ++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) || ++ (!curr->crashes && !curr->expires)) ++ return 0; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) ++ return 1; ++ else if (time_before_eq(curr->expires, get_seconds())) { ++ curr->crashes = 0; ++ curr->expires = 0; ++ } ++ ++ return 0; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ struct acl_subject_label *curracl; ++ __u32 curr_ip; ++ struct task_struct *p, *p2; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curracl = task->acl; ++ curr_ip = task->signal->curr_ip; ++ ++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) { ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ if (p->signal->curr_ip == curr_ip) ++ gr_fake_force_sig(SIGKILL, p); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ } else if (curracl->mode & GR_KILLPROC) ++ gr_fake_force_sig(SIGKILL, task); ++ ++ return; ++} +diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c +new file mode 100644 +index 0000000..9d83a69 +--- /dev/null ++++ b/grsecurity/gracl_shm.c +@@ -0,0 +1,40 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/ipc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, const int shmid) ++{ ++ struct task_struct *task; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ task = find_task_by_vpid(shm_cprid); ++ ++ if (unlikely(!task)) ++ task = find_task_by_vpid(shm_lapid); ++ ++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) || ++ (task->pid == shm_lapid)) && ++ (task->acl->mode & GR_PROTSHM) && ++ (task->acl != current->acl))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid); ++ return 0; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 1; ++} +diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c +new file mode 100644 +index 0000000..bc0be01 +--- /dev/null ++++ b/grsecurity/grsec_chdir.c +@@ -0,0 +1,19 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ if ((grsec_enable_chdir && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && ++ !grsec_enable_group)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); ++ } ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c +new file mode 100644 +index 0000000..197bdd5 +--- /dev/null ++++ b/grsecurity/grsec_chroot.c +@@ -0,0 +1,386 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/mount.h> ++#include <linux/types.h> ++#include <linux/pid_namespace.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void gr_set_chroot_entries(struct task_struct *task, struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry && ++ path->dentry != task->nsproxy->mnt_ns->root->mnt_root) ++ task->gr_is_chrooted = 1; ++ else ++ task->gr_is_chrooted = 0; ++ ++ task->gr_chroot_dentry = path->dentry; ++#endif ++ return; ++} ++ ++void gr_clear_chroot_entries(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ task->gr_is_chrooted = 0; ++ task->gr_chroot_dentry = NULL; ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_unix(const pid_t pid) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ struct task_struct *p; ++ ++ if (unlikely(!grsec_enable_chroot_unix)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ p = find_task_by_vpid_unrestricted(pid); ++ if (unlikely(p && !have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); ++ return 0; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++int ++gr_handle_chroot_nice(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && (niceval < task_nice(p)) ++ && proc_is_chrooted(current)) { ++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_rawio(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) && ++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO)) ++ return 1; ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ struct task_struct *p; ++ int ret = 0; ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if (!have_same_root(current, p)) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ return ret; ++#endif ++ return 0; ++} ++ ++int ++gr_pid_is_chrooted(struct task_struct *p) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL) ++ return 0; ++ ++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) || ++ !have_same_root(current, p)) { ++ return 1; ++ } ++#endif ++ return 0; ++} ++ ++EXPORT_SYMBOL(gr_pid_is_chrooted); ++ ++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) ++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) ++{ ++ struct dentry *dentry = (struct dentry *)u_dentry; ++ struct vfsmount *mnt = (struct vfsmount *)u_mnt; ++ struct dentry *realroot; ++ struct vfsmount *realrootmnt; ++ struct dentry *currentroot; ++ struct vfsmount *currentmnt; ++ struct task_struct *reaper = &init_task; ++ int ret = 1; ++ ++ read_lock(&reaper->fs->lock); ++ realrootmnt = mntget(reaper->fs->root.mnt); ++ realroot = dget(reaper->fs->root.dentry); ++ read_unlock(&reaper->fs->lock); ++ ++ read_lock(¤t->fs->lock); ++ currentmnt = mntget(current->fs->root.mnt); ++ currentroot = dget(current->fs->root.dentry); ++ read_unlock(¤t->fs->lock); ++ ++ spin_lock(&dcache_lock); ++ for (;;) { ++ if (unlikely((dentry == realroot && mnt == realrootmnt) ++ || (dentry == currentroot && mnt == currentmnt))) ++ break; ++ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ dentry = dentry->d_parent; ++ } ++ spin_unlock(&dcache_lock); ++ ++ dput(currentroot); ++ mntput(currentmnt); ++ ++ /* access is outside of chroot */ ++ if (dentry == realroot && mnt == realrootmnt) ++ ret = 0; ++ ++ dput(realroot); ++ mntput(realrootmnt); ++ return ret; ++} ++#endif ++ ++int ++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ if (!grsec_enable_chroot_fchdir) ++ return 1; ++ ++ if (!proc_is_chrooted(current)) ++ return 1; ++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); ++ return 0; ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ struct task_struct *p; ++ time_t starttime; ++ ++ if (unlikely(!grsec_enable_chroot_shmat)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) { ++ starttime = p->start_time.tv_sec; ++ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) { ++ if (have_same_root(current, p)) { ++ goto allow; ++ } else { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } ++ /* creator exited, pid reuse, fall through to next check */ ++ } ++ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) { ++ if (unlikely(!have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } ++ ++allow: ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++void ++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) ++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, const char *dev_name) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { ++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_pivot(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ if (grsec_enable_chroot_double && proc_is_chrooted(current) && ++ !gr_is_outside_chroot(dentry, mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++extern const char *captab_log[]; ++extern int captab_log_entries; ++ ++int ++gr_chroot_is_capable(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; ++ if (cap_raised(chroot_caps, cap)) { ++ const struct cred *creds = current_cred(); ++ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) { ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]); ++ } ++ return 0; ++ } ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_is_capable_nolog(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; ++ if (cap_raised(chroot_caps, cap)) { ++ return 0; ++ } ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_handle_chroot_sysctl(const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current) ++ && (op & MAY_WRITE)) ++ return -EACCES; ++#endif ++ return 0; ++} ++ ++void ++gr_handle_chroot_chdir(struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ if (grsec_enable_chroot_chdir) ++ set_fs_pwd(current->fs, path); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ /* allow chmod +s on directories, but not on files */ ++ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) && ++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c +new file mode 100644 +index 0000000..40545bf +--- /dev/null ++++ b/grsecurity/grsec_disabled.c +@@ -0,0 +1,437 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/kdev_t.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <linux/skbuff.h> ++#include <linux/sysctl.h> ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ return; ++} ++#endif ++ ++#ifdef CONFIG_SYSCTL ++__u32 ++gr_handle_sysctl(const struct ctl_table * table, const int op) ++{ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ return 0; ++} ++#endif ++ ++int ++gr_acl_is_enabled(void) ++{ ++ return 0; ++} ++ ++void ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) ++{ ++ return; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++ return 0; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ return; ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ return 0; ++} ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ return 0; ++} ++ ++void ++gr_learn_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ return; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ return 0; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *tsk) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ return 0; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ return; ++} ++ ++void ++gr_set_pax_flags(struct task_struct *task) ++{ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_share) ++{ ++ return 0; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return; ++} ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ return 0; ++} ++ ++int ++gr_check_crash_uid(const uid_t uid) ++{ ++ return 0; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ return; ++} ++ ++int ++gr_search_socket(const int family, const int type, const int protocol) ++{ ++ return 1; ++} ++ ++int ++gr_search_connectbind(const int mode, const struct socket *sock, ++ const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ return; ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ int acc_mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot, ++ unsigned int *vm_flags) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, ++ const struct vfsmount * mnt, const int fmode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, ++ umode_t *mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++void ++grsecurity_init(void) ++{ ++ return; ++} ++ ++umode_t gr_acl_umask(void) ++{ ++ return 0; ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const char *from) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const char *to) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_rename(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct inode *old_parent_inode, ++ const struct vfsmount *old_mnt, const char *newname) ++{ ++ return 0; ++} ++ ++int ++gr_acl_handle_filldir(const struct file *file, const char *name, ++ const int namelen, const ino_t ino) ++{ ++ return 1; ++} ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, const int shmid) ++{ ++ return 1; ++} ++ ++int ++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++int ++gr_search_accept(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_listen(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode, ++ const int imode) ++{ ++ return 1; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ return 1; ++} ++ ++void ++gr_set_role_label(const uid_t uid, const gid_t gid) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++void ++gr_set_kernel_label(struct task_struct *task) ++{ ++ return; ++} ++ ++int ++gr_check_user_change(int real, int effective, int fs) ++{ ++ return 0; ++} ++ ++int ++gr_check_group_change(int real, int effective, int fs) ++{ ++ return 0; ++} ++ ++int gr_acl_enable_at_secure(void) ++{ ++ return 0; ++} ++ ++dev_t gr_get_dev_from_dentry(struct dentry *dentry) ++{ ++ return dentry->d_inode->i_sb->s_dev; ++} ++ ++EXPORT_SYMBOL(gr_learn_resource); ++EXPORT_SYMBOL(gr_set_kernel_label); ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL(gr_check_user_change); ++EXPORT_SYMBOL(gr_check_group_change); ++#endif +diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c +new file mode 100644 +index 0000000..a96e155 +--- /dev/null ++++ b/grsecurity/grsec_exec.c +@@ -0,0 +1,204 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/binfmts.h> ++#include <linux/smp_lock.h> ++#include <linux/fs.h> ++#include <linux/types.h> ++#include <linux/grdefs.h> ++#include <linux/grinternal.h> ++#include <linux/capability.h> ++#include <linux/compat.h> ++#include <linux/module.h> ++ ++#include <asm/uaccess.h> ++ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++static char gr_exec_arg_buf[132]; ++static DEFINE_MUTEX(gr_exec_arg_mutex); ++#endif ++ ++void ++gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv) ++{ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ char *grarg = gr_exec_arg_buf; ++ unsigned int i, x, execlen = 0; ++ char c; ++ ++ if (!((grsec_enable_execlog && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) ++ || (grsec_enable_execlog && !grsec_enable_group))) ++ return; ++ ++ mutex_lock(&gr_exec_arg_mutex); ++ memset(grarg, 0, sizeof(gr_exec_arg_buf)); ++ ++ if (unlikely(argv == NULL)) ++ goto log; ++ ++ for (i = 0; i < bprm->argc && execlen < 128; i++) { ++ const char __user *p; ++ unsigned int len; ++ ++ if (copy_from_user(&p, argv + i, sizeof(p))) ++ goto log; ++ if (!p) ++ goto log; ++ len = strnlen_user(p, 128 - execlen); ++ if (len > 128 - execlen) ++ len = 128 - execlen; ++ else if (len > 0) ++ len--; ++ if (copy_from_user(grarg + execlen, p, len)) ++ goto log; ++ ++ /* rewrite unprintable characters */ ++ for (x = 0; x < len; x++) { ++ c = *(grarg + execlen + x); ++ if (c < 32 || c > 126) ++ *(grarg + execlen + x) = ' '; ++ } ++ ++ execlen += len; ++ *(grarg + execlen) = ' '; ++ *(grarg + execlen + 1) = '\0'; ++ execlen++; ++ } ++ ++ log: ++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, ++ bprm->file->f_path.mnt, grarg); ++ mutex_unlock(&gr_exec_arg_mutex); ++#endif ++ return; ++} ++ ++#ifdef CONFIG_COMPAT ++void ++gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv) ++{ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ char *grarg = gr_exec_arg_buf; ++ unsigned int i, x, execlen = 0; ++ char c; ++ ++ if (!((grsec_enable_execlog && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) ++ || (grsec_enable_execlog && !grsec_enable_group))) ++ return; ++ ++ mutex_lock(&gr_exec_arg_mutex); ++ memset(grarg, 0, sizeof(gr_exec_arg_buf)); ++ ++ if (unlikely(argv == NULL)) ++ goto log; ++ ++ for (i = 0; i < bprm->argc && execlen < 128; i++) { ++ compat_uptr_t p; ++ unsigned int len; ++ ++ if (get_user(p, argv + i)) ++ goto log; ++ len = strnlen_user(compat_ptr(p), 128 - execlen); ++ if (len > 128 - execlen) ++ len = 128 - execlen; ++ else if (len > 0) ++ len--; ++ else ++ goto log; ++ if (copy_from_user(grarg + execlen, compat_ptr(p), len)) ++ goto log; ++ ++ /* rewrite unprintable characters */ ++ for (x = 0; x < len; x++) { ++ c = *(grarg + execlen + x); ++ if (c < 32 || c > 126) ++ *(grarg + execlen + x) = ' '; ++ } ++ ++ execlen += len; ++ *(grarg + execlen) = ' '; ++ *(grarg + execlen + 1) = '\0'; ++ execlen++; ++ } ++ ++ log: ++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, ++ bprm->file->f_path.mnt, grarg); ++ mutex_unlock(&gr_exec_arg_mutex); ++#endif ++ return; ++} ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++extern int gr_acl_is_capable(const int cap); ++extern int gr_acl_is_capable_nolog(const int cap); ++extern int gr_chroot_is_capable(const int cap); ++extern int gr_chroot_is_capable_nolog(const int cap); ++#endif ++ ++const char *captab_log[] = { ++ "CAP_CHOWN", ++ "CAP_DAC_OVERRIDE", ++ "CAP_DAC_READ_SEARCH", ++ "CAP_FOWNER", ++ "CAP_FSETID", ++ "CAP_KILL", ++ "CAP_SETGID", ++ "CAP_SETUID", ++ "CAP_SETPCAP", ++ "CAP_LINUX_IMMUTABLE", ++ "CAP_NET_BIND_SERVICE", ++ "CAP_NET_BROADCAST", ++ "CAP_NET_ADMIN", ++ "CAP_NET_RAW", ++ "CAP_IPC_LOCK", ++ "CAP_IPC_OWNER", ++ "CAP_SYS_MODULE", ++ "CAP_SYS_RAWIO", ++ "CAP_SYS_CHROOT", ++ "CAP_SYS_PTRACE", ++ "CAP_SYS_PACCT", ++ "CAP_SYS_ADMIN", ++ "CAP_SYS_BOOT", ++ "CAP_SYS_NICE", ++ "CAP_SYS_RESOURCE", ++ "CAP_SYS_TIME", ++ "CAP_SYS_TTY_CONFIG", ++ "CAP_MKNOD", ++ "CAP_LEASE", ++ "CAP_AUDIT_WRITE", ++ "CAP_AUDIT_CONTROL", ++ "CAP_SETFCAP", ++ "CAP_MAC_OVERRIDE", ++ "CAP_MAC_ADMIN" ++}; ++ ++int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]); ++ ++int gr_is_capable(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++int gr_is_capable_nolog(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++EXPORT_SYMBOL(gr_is_capable); ++EXPORT_SYMBOL(gr_is_capable_nolog); +diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c +new file mode 100644 +index 0000000..d3ee748 +--- /dev/null ++++ b/grsecurity/grsec_fifo.c +@@ -0,0 +1,24 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, const int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_FIFO ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) && ++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) && ++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) && ++ (cred->fsuid != dentry->d_inode->i_uid)) { ++ if (!inode_permission(dentry->d_inode, acc_mode)) ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c +new file mode 100644 +index 0000000..8ca18bf +--- /dev/null ++++ b/grsecurity/grsec_fork.c +@@ -0,0 +1,23 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/errno.h> ++ ++void ++gr_log_forkfail(const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) { ++ switch (retval) { ++ case -EAGAIN: ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN"); ++ break; ++ case -ENOMEM: ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM"); ++ break; ++ } ++ } ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c +new file mode 100644 +index 0000000..1e995d3 +--- /dev/null ++++ b/grsecurity/grsec_init.c +@@ -0,0 +1,278 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/smp_lock.h> ++#include <linux/gracl.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/percpu.h> ++#include <linux/module.h> ++ ++int grsec_enable_ptrace_readexec; ++int grsec_enable_setxid; ++int grsec_enable_brute; ++int grsec_enable_link; ++int grsec_enable_dmesg; ++int grsec_enable_harden_ptrace; ++int grsec_enable_fifo; ++int grsec_enable_execlog; ++int grsec_enable_signal; ++int grsec_enable_forkfail; ++int grsec_enable_audit_ptrace; ++int grsec_enable_time; ++int grsec_enable_audit_textrel; ++int grsec_enable_group; ++int grsec_audit_gid; ++int grsec_enable_chdir; ++int grsec_enable_mount; ++int grsec_enable_rofs; ++int grsec_enable_chroot_findtask; ++int grsec_enable_chroot_mount; ++int grsec_enable_chroot_shmat; ++int grsec_enable_chroot_fchdir; ++int grsec_enable_chroot_double; ++int grsec_enable_chroot_pivot; ++int grsec_enable_chroot_chdir; ++int grsec_enable_chroot_chmod; ++int grsec_enable_chroot_mknod; ++int grsec_enable_chroot_nice; ++int grsec_enable_chroot_execlog; ++int grsec_enable_chroot_caps; ++int grsec_enable_chroot_sysctl; ++int grsec_enable_chroot_unix; ++int grsec_enable_tpe; ++int grsec_tpe_gid; ++int grsec_enable_blackhole; ++#ifdef CONFIG_IPV6_MODULE ++EXPORT_SYMBOL(grsec_enable_blackhole); ++#endif ++int grsec_lastack_retries; ++int grsec_enable_tpe_all; ++int grsec_enable_tpe_invert; ++int grsec_enable_socket_all; ++int grsec_socket_all_gid; ++int grsec_enable_socket_client; ++int grsec_socket_client_gid; ++int grsec_enable_socket_server; ++int grsec_socket_server_gid; ++int grsec_resource_logging; ++int grsec_disable_privio; ++int grsec_enable_log_rwxmaps; ++int grsec_lock; ++ ++DEFINE_SPINLOCK(grsec_alert_lock); ++unsigned long grsec_alert_wtime = 0; ++unsigned long grsec_alert_fyet = 0; ++ ++DEFINE_SPINLOCK(grsec_audit_lock); ++ ++DEFINE_RWLOCK(grsec_exec_file_lock); ++ ++char *gr_shared_page[4]; ++ ++char *gr_alert_log_fmt; ++char *gr_audit_log_fmt; ++char *gr_alert_log_buf; ++char *gr_audit_log_buf; ++ ++extern struct gr_arg *gr_usermode; ++extern unsigned char *gr_system_salt; ++extern unsigned char *gr_system_sum; ++ ++void __init ++grsecurity_init(void) ++{ ++ int j; ++ /* create the per-cpu shared pages */ ++ ++#ifdef CONFIG_X86 ++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36); ++#endif ++ ++ for (j = 0; j < 4; j++) { ++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long)); ++ if (gr_shared_page[j] == NULL) { ++ panic("Unable to allocate grsecurity shared page"); ++ return; ++ } ++ } ++ ++ /* allocate log buffers */ ++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_alert_log_fmt) { ++ panic("Unable to allocate grsecurity alert log format buffer"); ++ return; ++ } ++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_audit_log_fmt) { ++ panic("Unable to allocate grsecurity audit log format buffer"); ++ return; ++ } ++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_alert_log_buf) { ++ panic("Unable to allocate grsecurity alert log buffer"); ++ return; ++ } ++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_audit_log_buf) { ++ panic("Unable to allocate grsecurity audit log buffer"); ++ return; ++ } ++ ++ /* allocate memory for authentication structure */ ++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); ++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); ++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); ++ ++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) { ++ panic("Unable to allocate grsecurity authentication structure"); ++ return; ++ } ++ ++ ++#ifdef CONFIG_GRKERNSEC_IO ++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO) ++ grsec_disable_privio = 1; ++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++ grsec_disable_privio = 1; ++#else ++ grsec_disable_privio = 0; ++#endif ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ /* for backward compatibility, tpe_invert always defaults to on if ++ enabled in the kernel ++ */ ++ grsec_enable_tpe_invert = 1; ++#endif ++ ++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++#ifndef CONFIG_GRKERNSEC_SYSCTL ++ grsec_lock = 1; ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ grsec_enable_audit_textrel = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ grsec_enable_log_rwxmaps = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ grsec_enable_group = 1; ++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ grsec_enable_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ grsec_enable_harden_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ grsec_enable_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ grsec_enable_link = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ grsec_enable_brute = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ grsec_enable_dmesg = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ grsec_enable_blackhole = 1; ++ grsec_lastack_retries = 4; ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ grsec_enable_fifo = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ grsec_enable_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ grsec_enable_setxid = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ grsec_enable_ptrace_readexec = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ grsec_enable_signal = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ grsec_enable_forkfail = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ grsec_enable_time = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ grsec_resource_logging = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ grsec_enable_chroot_findtask = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ grsec_enable_chroot_unix = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ grsec_enable_chroot_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ grsec_enable_chroot_fchdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ grsec_enable_chroot_shmat = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ grsec_enable_audit_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ grsec_enable_chroot_double = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ grsec_enable_chroot_pivot = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ grsec_enable_chroot_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ grsec_enable_chroot_chmod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ grsec_enable_chroot_mknod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ grsec_enable_chroot_nice = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ grsec_enable_chroot_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ grsec_enable_chroot_caps = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ grsec_enable_chroot_sysctl = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ grsec_enable_tpe = 1; ++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID; ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ grsec_enable_tpe_all = 1; ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ grsec_enable_socket_all = 1; ++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ grsec_enable_socket_client = 1; ++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ grsec_enable_socket_server = 1; ++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID; ++#endif ++#endif ++ ++ return; ++} +diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c +new file mode 100644 +index 0000000..3efe141 +--- /dev/null ++++ b/grsecurity/grsec_link.c +@@ -0,0 +1,43 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && S_ISLNK(inode->i_mode) && ++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) && ++ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) { ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, const int mode, const char *to) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && cred->fsuid != inode->i_uid && ++ (!S_ISREG(mode) || (mode & S_ISUID) || ++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) || ++ (inode_permission(inode, MAY_READ | MAY_WRITE))) && ++ !capable(CAP_FOWNER) && cred->uid) { ++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c +new file mode 100644 +index 0000000..a45d2e9 +--- /dev/null ++++ b/grsecurity/grsec_log.c +@@ -0,0 +1,322 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/tty.h> ++#include <linux/fs.h> ++#include <linux/grinternal.h> ++ ++#ifdef CONFIG_TREE_PREEMPT_RCU ++#define DISABLE_PREEMPT() preempt_disable() ++#define ENABLE_PREEMPT() preempt_enable() ++#else ++#define DISABLE_PREEMPT() ++#define ENABLE_PREEMPT() ++#endif ++ ++#define BEGIN_LOCKS(x) \ ++ DISABLE_PREEMPT(); \ ++ rcu_read_lock(); \ ++ read_lock(&tasklist_lock); \ ++ read_lock(&grsec_exec_file_lock); \ ++ if (x != GR_DO_AUDIT) \ ++ spin_lock(&grsec_alert_lock); \ ++ else \ ++ spin_lock(&grsec_audit_lock) ++ ++#define END_LOCKS(x) \ ++ if (x != GR_DO_AUDIT) \ ++ spin_unlock(&grsec_alert_lock); \ ++ else \ ++ spin_unlock(&grsec_audit_lock); \ ++ read_unlock(&grsec_exec_file_lock); \ ++ read_unlock(&tasklist_lock); \ ++ rcu_read_unlock(); \ ++ ENABLE_PREEMPT(); \ ++ if (x == GR_DONT_AUDIT) \ ++ gr_handle_alertkill(current) ++ ++enum { ++ FLOODING, ++ NO_FLOODING ++}; ++ ++extern char *gr_alert_log_fmt; ++extern char *gr_audit_log_fmt; ++extern char *gr_alert_log_buf; ++extern char *gr_audit_log_buf; ++ ++static int gr_log_start(int audit) ++{ ++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; ++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0) ++ unsigned long curr_secs = get_seconds(); ++ ++ if (audit == GR_DO_AUDIT) ++ goto set_fmt; ++ ++ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) { ++ grsec_alert_wtime = curr_secs; ++ grsec_alert_fyet = 0; ++ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME) ++ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { ++ grsec_alert_fyet++; ++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { ++ grsec_alert_wtime = curr_secs; ++ grsec_alert_fyet++; ++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); ++ return FLOODING; ++ } ++ else return FLOODING; ++ ++set_fmt: ++#endif ++ memset(buf, 0, PAGE_SIZE); ++ if (current->signal->curr_ip && gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else if (current->signal->curr_ip) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip); ++ } else if (gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else { ++ sprintf(fmt, "%s%s", loglevel, "grsec: "); ++ strcpy(buf, fmt); ++ } ++ ++ return NO_FLOODING; ++} ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++ __attribute__ ((format (printf, 2, 0))); ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ ++ return; ++} ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++ __attribute__ ((format (printf, 2, 3))); ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ va_list ap; ++ ++ va_start(ap, msg); ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ va_end(ap); ++ ++ return; ++} ++ ++static void gr_log_end(int audit, int append_default) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ ++ if (append_default) { ++ unsigned int len = strlen(buf); ++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent))); ++ } ++ ++ printk("%s\n", buf); ++ ++ return; ++} ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...) ++{ ++ int logtype; ++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; ++ char *str1 = NULL, *str2 = NULL, *str3 = NULL; ++ void *voidptr = NULL; ++ int num1 = 0, num2 = 0; ++ unsigned long ulong1 = 0, ulong2 = 0; ++ struct dentry *dentry = NULL; ++ struct vfsmount *mnt = NULL; ++ struct file *file = NULL; ++ struct task_struct *task = NULL; ++ const struct cred *cred, *pcred; ++ va_list ap; ++ ++ BEGIN_LOCKS(audit); ++ logtype = gr_log_start(audit); ++ if (logtype == FLOODING) { ++ END_LOCKS(audit); ++ return; ++ } ++ va_start(ap, argtypes); ++ switch (argtypes) { ++ case GR_TTYSNIFF: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid); ++ break; ++ case GR_SYSCTL_HIDDEN: ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, str1); ++ break; ++ case GR_RBAC: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_STR_RBAC: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_MODE2: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); ++ break; ++ case GR_RBAC_MODE3: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ str3 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); ++ break; ++ case GR_FILENAME: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_STR_FILENAME: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_FILENAME_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_FILENAME_TWO_INT: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); ++ break; ++ case GR_FILENAME_TWO_INT_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); ++ break; ++ case GR_TEXTREL: ++ file = va_arg(ap, struct file *); ++ ulong1 = va_arg(ap, unsigned long); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2); ++ break; ++ case GR_PTRACE: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid); ++ break; ++ case GR_RESOURCE: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ str1 = va_arg(ap, char *); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_CAP: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_SIG: ++ str1 = va_arg(ap, char *); ++ voidptr = va_arg(ap, void *); ++ gr_log_middle_varargs(audit, msg, str1, voidptr); ++ break; ++ case GR_SIG2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ num1 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_CRASH1: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1); ++ break; ++ case GR_CRASH2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1); ++ break; ++ case GR_RWXMAP: ++ file = va_arg(ap, struct file *); ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>"); ++ break; ++ case GR_PSACCT: ++ { ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ char cur_tty[64] = { 0 }; ++ char parent_tty[64] = { 0 }; ++ ++ task = va_arg(ap, struct task_struct *); ++ wday = va_arg(ap, unsigned int); ++ cday = va_arg(ap, unsigned int); ++ whr = va_arg(ap, int); ++ chr = va_arg(ap, int); ++ wmin = va_arg(ap, int); ++ cmin = va_arg(ap, int); ++ wsec = va_arg(ap, int); ++ csec = va_arg(ap, int); ++ ulong1 = va_arg(ap, unsigned long); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ } ++ break; ++ default: ++ gr_log_middle(audit, msg, ap); ++ } ++ va_end(ap); ++ // these don't need DEFAULTSECARGS printed on the end ++ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2) ++ gr_log_end(audit, 0); ++ else ++ gr_log_end(audit, 1); ++ END_LOCKS(audit); ++} +diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c +new file mode 100644 +index 0000000..f536303 +--- /dev/null ++++ b/grsecurity/grsec_mem.c +@@ -0,0 +1,40 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/mman.h> ++#include <linux/grinternal.h> ++ ++void ++gr_handle_ioperm(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); ++ return; ++} ++ ++void ++gr_handle_iopl(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); ++ return; ++} ++ ++void ++gr_handle_mem_readwrite(u64 from, u64 to) ++{ ++ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to); ++ return; ++} ++ ++void ++gr_handle_vm86(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG); ++ return; ++} ++ ++void ++gr_log_badprocpid(const char *entry) ++{ ++ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry); ++ return; ++} +diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c +new file mode 100644 +index 0000000..2131422 +--- /dev/null ++++ b/grsecurity/grsec_mount.c +@@ -0,0 +1,62 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mount.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_remount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_unmount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_mount(const char *from, const char *to, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to); ++#endif ++ return; ++} ++ ++int ++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} ++ ++int ++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) && ++ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c +new file mode 100644 +index 0000000..a3b12a0 +--- /dev/null ++++ b/grsecurity/grsec_pax.c +@@ -0,0 +1,36 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++#include <linux/grsecurity.h> ++ ++void ++gr_log_textrel(struct vm_area_struct * vma) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ if (grsec_enable_audit_textrel) ++ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff); ++#endif ++ return; ++} ++ ++void ++gr_log_rwxmmap(struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file); ++#endif ++ return; ++} ++ ++void ++gr_log_rwxmprotect(struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file); ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c +new file mode 100644 +index 0000000..78f8733 +--- /dev/null ++++ b/grsecurity/grsec_ptrace.c +@@ -0,0 +1,30 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++#include <linux/security.h> ++ ++void ++gr_audit_ptrace(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ if (grsec_enable_audit_ptrace) ++ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task); ++#endif ++ return; ++} ++ ++int ++gr_ptrace_readexec(struct file *file, int unsafe_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ const struct dentry *dentry = file->f_path.dentry; ++ const struct vfsmount *mnt = file->f_path.mnt; ++ ++ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) && ++ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c +new file mode 100644 +index 0000000..c648492 +--- /dev/null ++++ b/grsecurity/grsec_sig.c +@@ -0,0 +1,206 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/delay.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/hardirq.h> ++ ++char *signames[] = { ++ [SIGSEGV] = "Segmentation fault", ++ [SIGILL] = "Illegal instruction", ++ [SIGABRT] = "Abort", ++ [SIGBUS] = "Invalid alignment/Bus error" ++}; ++ ++void ++gr_log_signal(const int sig, const void *addr, const struct task_struct *t) ++{ ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || ++ (sig == SIGABRT) || (sig == SIGBUS))) { ++ if (t->pid == current->pid) { ++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr); ++ } else { ++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); ++ } ++ } ++#endif ++ return; ++} ++ ++int ++gr_handle_signal(const struct task_struct *p, const int sig) ++{ ++#ifdef CONFIG_GRKERNSEC ++ /* ignore the 0 signal for protected task checks */ ++ if (current->pid > 1 && sig && gr_check_protected_task(p)) { ++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); ++ return -EPERM; ++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) { ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_GRKERNSEC ++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t); ++ ++int gr_fake_force_sig(int sig, struct task_struct *t) ++{ ++ unsigned long int flags; ++ int ret, blocked, ignored; ++ struct k_sigaction *action; ++ ++ spin_lock_irqsave(&t->sighand->siglock, flags); ++ action = &t->sighand->action[sig-1]; ++ ignored = action->sa.sa_handler == SIG_IGN; ++ blocked = sigismember(&t->blocked, sig); ++ if (blocked || ignored) { ++ action->sa.sa_handler = SIG_DFL; ++ if (blocked) { ++ sigdelset(&t->blocked, sig); ++ recalc_sigpending_and_wake(t); ++ } ++ } ++ if (action->sa.sa_handler == SIG_DFL) ++ t->signal->flags &= ~SIGNAL_UNKILLABLE; ++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t); ++ ++ spin_unlock_irqrestore(&t->sighand->siglock, flags); ++ ++ return ret; ++} ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++#define GR_USER_BAN_TIME (15 * 60) ++ ++static int __get_dumpable(unsigned long mm_flags) ++{ ++ int ret; ++ ++ ret = mm_flags & MMF_DUMPABLE_MASK; ++ return (ret >= 2) ? 2 : ret; ++} ++#endif ++ ++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ uid_t uid = 0; ++ ++ if (!grsec_enable_brute) ++ return; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ if (p->real_parent && p->real_parent->exec_file == p->exec_file) ++ p->real_parent->brute = 1; ++ else { ++ const struct cred *cred = __task_cred(p), *cred2; ++ struct task_struct *tsk, *tsk2; ++ ++ if (!__get_dumpable(mm_flags) && cred->uid) { ++ struct user_struct *user; ++ ++ uid = cred->uid; ++ ++ /* this is put upon execution past expiration */ ++ user = find_user(uid); ++ if (user == NULL) ++ goto unlock; ++ user->banned = 1; ++ user->ban_expires = get_seconds() + GR_USER_BAN_TIME; ++ if (user->ban_expires == ~0UL) ++ user->ban_expires--; ++ ++ do_each_thread(tsk2, tsk) { ++ cred2 = __task_cred(tsk); ++ if (tsk != p && cred2->uid == uid) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ } ++ } ++unlock: ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ if (uid) ++ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60); ++#endif ++ return; ++} ++ ++void gr_handle_brute_check(void) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ if (current->brute) ++ msleep(30 * 1000); ++#endif ++ return; ++} ++ ++void gr_handle_kernel_exploit(void) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ const struct cred *cred; ++ struct task_struct *tsk, *tsk2; ++ struct user_struct *user; ++ uid_t uid; ++ ++ if (in_irq() || in_serving_softirq() || in_nmi()) ++ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context"); ++ ++ uid = current_uid(); ++ ++ if (uid == 0) ++ panic("grsec: halting the system due to suspicious kernel crash caused by root"); ++ else { ++ /* kill all the processes of this user, hold a reference ++ to their creds struct, and prevent them from creating ++ another process until system reset ++ */ ++ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid); ++ /* we intentionally leak this ref */ ++ user = get_uid(current->cred->user); ++ if (user) { ++ user->banned = 1; ++ user->ban_expires = ~0UL; ++ } ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ cred = __task_cred(tsk); ++ if (cred->uid == uid) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } ++#endif ++} ++ ++int __gr_process_user_ban(struct user_struct *user) ++{ ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ if (unlikely(user->banned)) { ++ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) { ++ user->banned = 0; ++ user->ban_expires = 0; ++ free_uid(user); ++ } else ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int gr_process_user_ban(void) ++{ ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ return __gr_process_user_ban(current->cred->user); ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c +new file mode 100644 +index 0000000..7512ea9 +--- /dev/null ++++ b/grsecurity/grsec_sock.c +@@ -0,0 +1,275 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <net/sock.h> ++#include <net/inet_sock.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++kernel_cap_t gr_cap_rtnetlink(struct sock *sock); ++EXPORT_SYMBOL(gr_cap_rtnetlink); ++ ++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr); ++ ++EXPORT_SYMBOL(gr_search_udp_recvmsg); ++EXPORT_SYMBOL(gr_search_udp_sendmsg); ++ ++#ifdef CONFIG_UNIX_MODULE ++EXPORT_SYMBOL(gr_acl_handle_unix); ++EXPORT_SYMBOL(gr_acl_handle_mknod); ++EXPORT_SYMBOL(gr_handle_chroot_unix); ++EXPORT_SYMBOL(gr_handle_create); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++#define gr_conn_table_size 32749 ++struct conn_table_entry { ++ struct conn_table_entry *next; ++ struct signal_struct *sig; ++}; ++ ++struct conn_table_entry *gr_conn_table[gr_conn_table_size]; ++DEFINE_SPINLOCK(gr_conn_table_lock); ++ ++extern const char * gr_socktype_to_name(unsigned char type); ++extern const char * gr_proto_to_name(unsigned char proto); ++extern const char * gr_sockfamily_to_name(unsigned char family); ++ ++static __inline__ int ++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) ++{ ++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); ++} ++ ++static __inline__ int ++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr && ++ sig->gr_sport == sport && sig->gr_dport == dport)) ++ return 1; ++ else ++ return 0; ++} ++ ++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent) ++{ ++ struct conn_table_entry **match; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ newent->sig = sig; ++ ++ match = &gr_conn_table[index]; ++ newent->next = *match; ++ *match = newent; ++ ++ return; ++} ++ ++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig) ++{ ++ struct conn_table_entry *match, *last = NULL; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, ++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport, ++ sig->gr_dport)) { ++ last = match; ++ match = match->next; ++ } ++ ++ if (match) { ++ if (last) ++ last->next = match->next; ++ else ++ gr_conn_table[index] = NULL; ++ kfree(match); ++ } ++ ++ return; ++} ++ ++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ struct conn_table_entry *match; ++ unsigned int index; ++ ++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport)) ++ match = match->next; ++ ++ if (match) ++ return match->sig; ++ else ++ return NULL; ++} ++ ++#endif ++ ++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *sig = task->signal; ++ struct conn_table_entry *newent; ++ ++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); ++ if (newent == NULL) ++ return; ++ /* no bh lock needed since we are called with bh disabled */ ++ spin_lock(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(sig); ++ sig->gr_saddr = inet->rcv_saddr; ++ sig->gr_daddr = inet->daddr; ++ sig->gr_sport = inet->sport; ++ sig->gr_dport = inet->dport; ++ gr_add_to_task_ip_table_nolock(sig, newent); ++ spin_unlock(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void gr_del_task_from_ip_table(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ spin_lock_bh(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(task->signal); ++ spin_unlock_bh(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void ++gr_attach_curr_ip(const struct sock *sk) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *p, *set; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ if (unlikely(sk->sk_protocol != IPPROTO_TCP)) ++ return; ++ ++ set = current->signal; ++ ++ spin_lock_bh(&gr_conn_table_lock); ++ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr, ++ inet->dport, inet->sport); ++ if (unlikely(p != NULL)) { ++ set->curr_ip = p->curr_ip; ++ set->used_accept = 1; ++ gr_del_task_from_ip_table_nolock(p); ++ spin_unlock_bh(&gr_conn_table_lock); ++ return; ++ } ++ spin_unlock_bh(&gr_conn_table_lock); ++ ++ set->curr_ip = inet->daddr; ++ set->used_accept = 1; ++#endif ++ return; ++} ++ ++int ++gr_handle_sock_all(const int family, const int type, const int protocol) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && ++ (family != AF_UNIX)) { ++ if (family == AF_INET) ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ else ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server_other(const struct sock *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sk_family != AF_UNIX) && ++ (sck->sk_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_client(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++kernel_cap_t ++gr_cap_rtnetlink(struct sock *sock) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (!gr_acl_is_enabled()) ++ return current_cap(); ++ else if (sock->sk_protocol == NETLINK_ISCSI && ++ cap_raised(current_cap(), CAP_SYS_ADMIN) && ++ gr_is_capable(CAP_SYS_ADMIN)) ++ return current_cap(); ++ else if (sock->sk_protocol == NETLINK_AUDIT && ++ cap_raised(current_cap(), CAP_AUDIT_WRITE) && ++ gr_is_capable(CAP_AUDIT_WRITE) && ++ cap_raised(current_cap(), CAP_AUDIT_CONTROL) && ++ gr_is_capable(CAP_AUDIT_CONTROL)) ++ return current_cap(); ++ else if (cap_raised(current_cap(), CAP_NET_ADMIN) && ++ ((sock->sk_protocol == NETLINK_ROUTE) ? ++ gr_is_capable_nolog(CAP_NET_ADMIN) : ++ gr_is_capable(CAP_NET_ADMIN))) ++ return current_cap(); ++ else ++ return __cap_empty_set; ++#else ++ return current_cap(); ++#endif ++} +diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c +new file mode 100644 +index 0000000..31f3258 +--- /dev/null ++++ b/grsecurity/grsec_sysctl.c +@@ -0,0 +1,499 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/sysctl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) { ++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_GRKERNSEC_ROFS ++static int __maybe_unused one = 1; ++#endif ++ ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) ++ctl_table grsecurity_table[] = { ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO ++#ifdef CONFIG_GRKERNSEC_IO ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "disable_priv_io", ++ .data = &grsec_disable_privio, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "linking_restrictions", ++ .data = &grsec_enable_link, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "deter_bruteforce", ++ .data = &grsec_enable_brute, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "fifo_restrictions", ++ .data = &grsec_enable_fifo, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "ptrace_readexec", ++ .data = &grsec_enable_ptrace_readexec, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "consistent_setxid", ++ .data = &grsec_enable_setxid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "ip_blackhole", ++ .data = &grsec_enable_blackhole, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "lastack_retries", ++ .data = &grsec_lastack_retries, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "exec_logging", ++ .data = &grsec_enable_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "rwxmap_logging", ++ .data = &grsec_enable_log_rwxmaps, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "signal_logging", ++ .data = &grsec_enable_signal, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "forkfail_logging", ++ .data = &grsec_enable_forkfail, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "timechange_logging", ++ .data = &grsec_enable_time, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_shmat", ++ .data = &grsec_enable_chroot_shmat, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_unix", ++ .data = &grsec_enable_chroot_unix, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_mount", ++ .data = &grsec_enable_chroot_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_fchdir", ++ .data = &grsec_enable_chroot_fchdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_chroot", ++ .data = &grsec_enable_chroot_double, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_pivot", ++ .data = &grsec_enable_chroot_pivot, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_enforce_chdir", ++ .data = &grsec_enable_chroot_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_chmod", ++ .data = &grsec_enable_chroot_chmod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_mknod", ++ .data = &grsec_enable_chroot_mknod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_restrict_nice", ++ .data = &grsec_enable_chroot_nice, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_execlog", ++ .data = &grsec_enable_chroot_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_caps", ++ .data = &grsec_enable_chroot_caps, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_deny_sysctl", ++ .data = &grsec_enable_chroot_sysctl, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "tpe", ++ .data = &grsec_enable_tpe, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "tpe_gid", ++ .data = &grsec_tpe_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "tpe_invert", ++ .data = &grsec_enable_tpe_invert, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "tpe_restrict_all", ++ .data = &grsec_enable_tpe_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "socket_all", ++ .data = &grsec_enable_socket_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "socket_all_gid", ++ .data = &grsec_socket_all_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "socket_client", ++ .data = &grsec_enable_socket_client, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "socket_client_gid", ++ .data = &grsec_socket_client_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "socket_server", ++ .data = &grsec_enable_socket_server, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "socket_server_gid", ++ .data = &grsec_socket_server_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "audit_group", ++ .data = &grsec_enable_group, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "audit_gid", ++ .data = &grsec_audit_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "audit_chdir", ++ .data = &grsec_enable_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "audit_mount", ++ .data = &grsec_enable_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "audit_textrel", ++ .data = &grsec_enable_audit_textrel, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "dmesg", ++ .data = &grsec_enable_dmesg, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "chroot_findtask", ++ .data = &grsec_enable_chroot_findtask, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "resource_logging", ++ .data = &grsec_resource_logging, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "audit_ptrace", ++ .data = &grsec_enable_audit_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "harden_ptrace", ++ .data = &grsec_enable_harden_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "grsec_lock", ++ .data = &grsec_lock, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_ROFS ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "romount_protect", ++ .data = &grsec_enable_rofs, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one, ++ }, ++#endif ++ { .ctl_name = 0 } ++}; ++#endif +diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c +new file mode 100644 +index 0000000..0dc13c3 +--- /dev/null ++++ b/grsecurity/grsec_time.c +@@ -0,0 +1,16 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++#include <linux/module.h> ++ ++void ++gr_log_timechange(void) ++{ ++#ifdef CONFIG_GRKERNSEC_TIME ++ if (grsec_enable_time) ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); ++#endif ++ return; ++} ++ ++EXPORT_SYMBOL(gr_log_timechange); +diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c +new file mode 100644 +index 0000000..07e0dc0 +--- /dev/null ++++ b/grsecurity/grsec_tpe.c +@@ -0,0 +1,73 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/grinternal.h> ++ ++extern int gr_acl_tpe_check(void); ++ ++int ++gr_tpe_allow(const struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct inode *inode = file->f_path.dentry->d_parent->d_inode; ++ const struct cred *cred = current_cred(); ++ char *msg = NULL; ++ char *msg2 = NULL; ++ ++ // never restrict root ++ if (!cred->uid) ++ return 1; ++ ++ if (grsec_enable_tpe) { ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ++ msg = "not being in trusted group"; ++ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)) ++ msg = "being in untrusted group"; ++#else ++ if (in_group_p(grsec_tpe_gid)) ++ msg = "being in untrusted group"; ++#endif ++ } ++ if (!msg && gr_acl_tpe_check()) ++ msg = "being in untrusted role"; ++ ++ // not in any affected group/role ++ if (!msg) ++ goto next_check; ++ ++ if (inode->i_uid) ++ msg2 = "file in non-root-owned directory"; ++ else if (inode->i_mode & S_IWOTH) ++ msg2 = "file in world-writable directory"; ++ else if (inode->i_mode & S_IWGRP) ++ msg2 = "file in group-writable directory"; ++ ++ if (msg && msg2) { ++ char fullmsg[70] = {0}; ++ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2); ++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++ msg = NULL; ++next_check: ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ if (!grsec_enable_tpe || !grsec_enable_tpe_all) ++ return 1; ++ ++ if (inode->i_uid && (inode->i_uid != cred->uid)) ++ msg = "directory not owned by user"; ++ else if (inode->i_mode & S_IWOTH) ++ msg = "file in world-writable directory"; ++ else if (inode->i_mode & S_IWGRP) ++ msg = "file in group-writable directory"; ++ ++ if (msg) { ++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++#endif ++#endif ++ return 1; ++} +diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c +new file mode 100644 +index 0000000..9f7b1ac +--- /dev/null ++++ b/grsecurity/grsum.c +@@ -0,0 +1,61 @@ ++#include <linux/err.h> ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/scatterlist.h> ++#include <linux/crypto.h> ++#include <linux/gracl.h> ++ ++ ++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) ++#error "crypto and sha256 must be built into the kernel" ++#endif ++ ++int ++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) ++{ ++ char *p; ++ struct crypto_hash *tfm; ++ struct hash_desc desc; ++ struct scatterlist sg; ++ unsigned char temp_sum[GR_SHA_LEN]; ++ volatile int retval = 0; ++ volatile int dummy = 0; ++ unsigned int i; ++ ++ sg_init_table(&sg, 1); ++ ++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); ++ if (IS_ERR(tfm)) { ++ /* should never happen, since sha256 should be built in */ ++ return 1; ++ } ++ ++ desc.tfm = tfm; ++ desc.flags = 0; ++ ++ crypto_hash_init(&desc); ++ ++ p = salt; ++ sg_set_buf(&sg, p, GR_SALT_LEN); ++ crypto_hash_update(&desc, &sg, sg.length); ++ ++ p = entry->pw; ++ sg_set_buf(&sg, p, strlen(p)); ++ ++ crypto_hash_update(&desc, &sg, sg.length); ++ ++ crypto_hash_final(&desc, temp_sum); ++ ++ memset(entry->pw, 0, GR_PW_LEN); ++ ++ for (i = 0; i < GR_SHA_LEN; i++) ++ if (sum[i] != temp_sum[i]) ++ retval = 1; ++ else ++ dummy = 1; // waste a cycle ++ ++ crypto_free_hash(tfm); ++ ++ return retval; ++} +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h +index 3cd9ccd..fe16d47 100644 +--- a/include/acpi/acpi_bus.h ++++ b/include/acpi/acpi_bus.h +@@ -107,7 +107,7 @@ struct acpi_device_ops { + acpi_op_bind bind; + acpi_op_unbind unbind; + acpi_op_notify notify; +-}; ++} __no_const; + + #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ + +diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h +index f4906f6..71feb73 100644 +--- a/include/acpi/acpi_drivers.h ++++ b/include/acpi/acpi_drivers.h +@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type); + Dock Station + -------------------------------------------------------------------------- */ + struct acpi_dock_ops { +- acpi_notify_handler handler; +- acpi_notify_handler uevent; ++ const acpi_notify_handler handler; ++ const acpi_notify_handler uevent; + }; + + #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE) +@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle); + extern int register_dock_notifier(struct notifier_block *nb); + extern void unregister_dock_notifier(struct notifier_block *nb); + extern int register_hotplug_dock_device(acpi_handle handle, +- struct acpi_dock_ops *ops, ++ const struct acpi_dock_ops *ops, + void *context); + extern void unregister_hotplug_dock_device(acpi_handle handle); + #else +@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb) + { + } + static inline int register_hotplug_dock_device(acpi_handle handle, +- struct acpi_dock_ops *ops, ++ const struct acpi_dock_ops *ops, + void *context) + { + return -ENODEV; +diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h +index b7babf0..a9ac9fc 100644 +--- a/include/asm-generic/atomic-long.h ++++ b/include/asm-generic/atomic-long.h +@@ -22,6 +22,12 @@ + + typedef atomic64_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic64_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic64_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + + static inline long atomic_long_read(atomic_long_t *l) +@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l) + return (long)atomic64_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic64_t *v = (atomic64_t *)l; +@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) + atomic64_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l) + atomic64_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l) + atomic64_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) + atomic64_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) + return (long)atomic64_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) + + typedef atomic_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) + static inline long atomic_long_read(atomic_long_t *l) + { +@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l) + return (long)atomic_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic_t *v = (atomic_t *)l; +@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) + atomic_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l) + atomic_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l) + atomic_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) + atomic_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) + return (long)atomic_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) + + #endif /* BITS_PER_LONG == 64 */ + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void pax_refcount_needs_these_functions(void) ++{ ++ atomic_read_unchecked((atomic_unchecked_t *)NULL); ++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0); ++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_inc_unchecked((atomic_unchecked_t *)NULL); ++ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL); ++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL); ++ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_dec_unchecked((atomic_unchecked_t *)NULL); ++ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0); ++ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0); ++ ++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0); ++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL); ++} ++#else ++#define atomic_read_unchecked(v) atomic_read(v) ++#define atomic_set_unchecked(v, i) atomic_set((v), (i)) ++#define atomic_add_unchecked(i, v) atomic_add((i), (v)) ++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v)) ++#define atomic_inc_unchecked(v) atomic_inc(v) ++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v) ++#define atomic_inc_return_unchecked(v) atomic_inc_return(v) ++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v)) ++#define atomic_dec_unchecked(v) atomic_dec(v) ++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n)) ++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i)) ++ ++#define atomic_long_read_unchecked(v) atomic_long_read(v) ++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i)) ++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v)) ++#define atomic_long_inc_unchecked(v) atomic_long_inc(v) ++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v) ++#define atomic_long_dec_unchecked(v) atomic_long_dec(v) ++#endif ++ + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ +diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h +index b18ce4f..2ee2843 100644 +--- a/include/asm-generic/atomic64.h ++++ b/include/asm-generic/atomic64.h +@@ -16,6 +16,8 @@ typedef struct { + long long counter; + } atomic64_t; + ++typedef atomic64_t atomic64_unchecked_t; ++ + #define ATOMIC64_INIT(i) { (i) } + + extern long long atomic64_read(const atomic64_t *v); +@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* _ASM_GENERIC_ATOMIC64_H */ +diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h +index d48ddf0..656a0ac 100644 +--- a/include/asm-generic/bug.h ++++ b/include/asm-generic/bug.h +@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line); + + #else /* !CONFIG_BUG */ + #ifndef HAVE_ARCH_BUG +-#define BUG() do {} while(0) ++#define BUG() do { for (;;) ; } while(0) + #endif + + #ifndef HAVE_ARCH_BUG_ON +-#define BUG_ON(condition) do { if (condition) ; } while(0) ++#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0) + #endif + + #ifndef HAVE_ARCH_WARN_ON +diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h +index 1bfcfe5..e04c5c9 100644 +--- a/include/asm-generic/cache.h ++++ b/include/asm-generic/cache.h +@@ -6,7 +6,7 @@ + * cache lines need to provide their own cache.h. + */ + +-#define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_SHIFT 5UL ++#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT) + + #endif /* __ASM_GENERIC_CACHE_H */ +diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h +index 6920695..41038bc 100644 +--- a/include/asm-generic/dma-mapping-common.h ++++ b/include/asm-generic/dma-mapping-common.h +@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(ptr, size); +@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) +@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + int i, ents; + struct scatterlist *s; + +@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); +@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); +@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) +@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) +@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) +@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, + size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_range_for_cpu) { +@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, + size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_range_for_device) { +@@ -155,7 +155,7 @@ static inline void + dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) +@@ -167,7 +167,7 @@ static inline void + dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) +diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h +index 0d68a1e..b74a761 100644 +--- a/include/asm-generic/emergency-restart.h ++++ b/include/asm-generic/emergency-restart.h +@@ -1,7 +1,7 @@ + #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H + #define _ASM_GENERIC_EMERGENCY_RESTART_H + +-static inline void machine_emergency_restart(void) ++static inline __noreturn void machine_emergency_restart(void) + { + machine_restart(NULL); + } +diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h +index 3c2344f..4590a7d 100644 +--- a/include/asm-generic/futex.h ++++ b/include/asm-generic/futex.h +@@ -6,7 +6,7 @@ + #include <asm/errno.h> + + static inline int +-futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ++futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) + { + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; +@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) + } + + static inline int +-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ++futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval) + { + return -ENOSYS; + } +diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h +index e5f234a..cdb16b3 100644 +--- a/include/asm-generic/kmap_types.h ++++ b/include/asm-generic/kmap_types.h +@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY, + KMAP_D(16) KM_IRQ_PTE, + KMAP_D(17) KM_NMI, + KMAP_D(18) KM_NMI_PTE, +-KMAP_D(19) KM_TYPE_NR ++KMAP_D(19) KM_CLEARPAGE, ++KMAP_D(20) KM_TYPE_NR + }; + + #undef KMAP_D +diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h +index fc21844..2ee9629 100644 +--- a/include/asm-generic/local.h ++++ b/include/asm-generic/local.h +@@ -39,6 +39,7 @@ typedef struct + #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) + #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) + #define local_inc_return(l) atomic_long_inc_return(&(l)->a) ++#define local_dec_return(l) atomic_long_dec_return(&(l)->a) + + #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) + #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) +diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h +index 725612b..9cc513a 100644 +--- a/include/asm-generic/pgtable-nopmd.h ++++ b/include/asm-generic/pgtable-nopmd.h +@@ -1,14 +1,19 @@ + #ifndef _PGTABLE_NOPMD_H + #define _PGTABLE_NOPMD_H + +-#ifndef __ASSEMBLY__ +- + #include <asm-generic/pgtable-nopud.h> + +-struct mm_struct; +- + #define __PAGETABLE_PMD_FOLDED + ++#define PMD_SHIFT PUD_SHIFT ++#define PTRS_PER_PMD 1 ++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) ++#define PMD_MASK (~(PMD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ ++struct mm_struct; ++ + /* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into +@@ -16,11 +21,6 @@ struct mm_struct; + */ + typedef struct { pud_t pud; } pmd_t; + +-#define PMD_SHIFT PUD_SHIFT +-#define PTRS_PER_PMD 1 +-#define PMD_SIZE (1UL << PMD_SHIFT) +-#define PMD_MASK (~(PMD_SIZE-1)) +- + /* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded +diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h +index 810431d..0ec4804f 100644 +--- a/include/asm-generic/pgtable-nopud.h ++++ b/include/asm-generic/pgtable-nopud.h +@@ -1,10 +1,15 @@ + #ifndef _PGTABLE_NOPUD_H + #define _PGTABLE_NOPUD_H + +-#ifndef __ASSEMBLY__ +- + #define __PAGETABLE_PUD_FOLDED + ++#define PUD_SHIFT PGDIR_SHIFT ++#define PTRS_PER_PUD 1 ++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) ++#define PUD_MASK (~(PUD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ + /* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into +@@ -12,11 +17,6 @@ + */ + typedef struct { pgd_t pgd; } pud_t; + +-#define PUD_SHIFT PGDIR_SHIFT +-#define PTRS_PER_PUD 1 +-#define PUD_SIZE (1UL << PUD_SHIFT) +-#define PUD_MASK (~(PUD_SIZE-1)) +- + /* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded +@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { } + #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + + #define pgd_populate(mm, pgd, pud) do { } while (0) ++#define pgd_populate_kernel(mm, pgd, pud) do { } while (0) + /* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index e2bd73e..fea8ed3 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); + #endif + ++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL ++static inline unsigned long pax_open_kernel(void) { return 0; } ++#endif ++ ++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_GENERIC_PGTABLE_H */ +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index b6e818f..21aa58a 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -199,6 +199,7 @@ + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rodata) = .; \ + *(.rodata) *(.rodata.*) \ ++ *(.data.read_only) \ + *(__vermagic) /* Kernel version magic */ \ + *(__markers_strings) /* Markers: strings */ \ + *(__tracepoints_strings)/* Tracepoints: strings */ \ +@@ -656,22 +657,24 @@ + * section in the linker script will go there too. @phdr should have + * a leading colon. + * +- * Note that this macros defines __per_cpu_load as an absolute symbol. ++ * Note that this macros defines per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU(). + */ + #define PERCPU_VADDR(vaddr, phdr) \ +- VMLINUX_SYMBOL(__per_cpu_load) = .; \ +- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ ++ per_cpu_load = .; \ ++ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \ + - LOAD_OFFSET) { \ ++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ + *(.data.percpu.first) \ +- *(.data.percpu.page_aligned) \ + *(.data.percpu) \ ++ . = ALIGN(PAGE_SIZE); \ ++ *(.data.percpu.page_aligned) \ + *(.data.percpu.shared_aligned) \ + VMLINUX_SYMBOL(__per_cpu_end) = .; \ + } phdr \ +- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); ++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu); + + /** + * PERCPU - define output section for percpu area, simple version +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index ebab6a6..351dba1 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -71,6 +71,7 @@ + #include <linux/workqueue.h> + #include <linux/poll.h> + #include <asm/pgalloc.h> ++#include <asm/local.h> + #include "drm.h" + + #include <linux/idr.h> +@@ -814,7 +815,7 @@ struct drm_driver { + void (*vgaarb_irq)(struct drm_device *dev, bool state); + + /* Driver private ops for this object */ +- struct vm_operations_struct *gem_vm_ops; ++ const struct vm_operations_struct *gem_vm_ops; + + int major; + int minor; +@@ -917,7 +918,7 @@ struct drm_device { + + /** \name Usage Counters */ + /*@{ */ +- int open_count; /**< Outstanding files open */ ++ local_t open_count; /**< Outstanding files open */ + atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ + atomic_t vma_count; /**< Outstanding vma areas open */ + int buf_use; /**< Buffers in use -- cannot alloc */ +@@ -928,7 +929,7 @@ struct drm_device { + /*@{ */ + unsigned long counters; + enum drm_stat_type types[15]; +- atomic_t counts[15]; ++ atomic_unchecked_t counts[15]; + /*@} */ + + struct list_head filelist; +@@ -1016,7 +1017,7 @@ struct drm_device { + struct pci_controller *hose; + #endif + struct drm_sg_mem *sg; /**< Scatter gather memory */ +- unsigned int num_crtcs; /**< Number of CRTCs on this device */ ++ unsigned int num_crtcs; /**< Number of CRTCs on this device */ + void *dev_private; /**< device private data */ + void *mm_private; + struct address_space *dev_mapping; +@@ -1042,11 +1043,11 @@ struct drm_device { + spinlock_t object_name_lock; + struct idr object_name_idr; + atomic_t object_count; +- atomic_t object_memory; ++ atomic_unchecked_t object_memory; + atomic_t pin_count; +- atomic_t pin_memory; ++ atomic_unchecked_t pin_memory; + atomic_t gtt_count; +- atomic_t gtt_memory; ++ atomic_unchecked_t gtt_memory; + uint32_t gtt_total; + uint32_t invalidate_domains; /* domains pending invalidation */ + uint32_t flush_domains; /* domains pending flush */ +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +index b29e201..3413cc9 100644 +--- a/include/drm/drm_crtc_helper.h ++++ b/include/drm/drm_crtc_helper.h +@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs { + + /* reload the current crtc LUT */ + void (*load_lut)(struct drm_crtc *crtc); +-}; ++} __no_const; + + struct drm_encoder_helper_funcs { + void (*dpms)(struct drm_encoder *encoder, int mode); +@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs { + struct drm_connector *connector); + /* disable encoder when not in use - more explicit than dpms off */ + void (*disable)(struct drm_encoder *encoder); +-}; ++} __no_const; + + struct drm_connector_helper_funcs { + int (*get_modes)(struct drm_connector *connector); +diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h +index b199170..6f9e64c 100644 +--- a/include/drm/ttm/ttm_memory.h ++++ b/include/drm/ttm/ttm_memory.h +@@ -47,7 +47,7 @@ + + struct ttm_mem_shrink { + int (*do_shrink) (struct ttm_mem_shrink *); +-}; ++} __no_const; + + /** + * struct ttm_mem_global - Global memory accounting structure. +diff --git a/include/linux/a.out.h b/include/linux/a.out.h +index e86dfca..40cc55f 100644 +--- a/include/linux/a.out.h ++++ b/include/linux/a.out.h +@@ -39,6 +39,14 @@ enum machine_type { + M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ + }; + ++/* Constants for the N_FLAGS field */ ++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ ++ + #if !defined (N_MAGIC) + #define N_MAGIC(exec) ((exec).a_info & 0xffff) + #endif +diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h +index 817b237..62c10bc 100644 +--- a/include/linux/atmdev.h ++++ b/include/linux/atmdev.h +@@ -237,7 +237,7 @@ struct compat_atm_iobuf { + #endif + + struct k_atm_aal_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + }; +diff --git a/include/linux/backlight.h b/include/linux/backlight.h +index 0f5f578..8c4f884 100644 +--- a/include/linux/backlight.h ++++ b/include/linux/backlight.h +@@ -36,18 +36,18 @@ struct backlight_device; + struct fb_info; + + struct backlight_ops { +- unsigned int options; ++ const unsigned int options; + + #define BL_CORE_SUSPENDRESUME (1 << 0) + + /* Notify the backlight driver some property has changed */ +- int (*update_status)(struct backlight_device *); ++ int (* const update_status)(struct backlight_device *); + /* Return the current backlight brightness (accounting for power, + fb_blank etc.) */ +- int (*get_brightness)(struct backlight_device *); ++ int (* const get_brightness)(struct backlight_device *); + /* Check if given framebuffer device is the one bound to this backlight; + return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ +- int (*check_fb)(struct fb_info *); ++ int (* const check_fb)(struct fb_info *); + }; + + /* This structure defines all the properties of a backlight */ +@@ -86,7 +86,7 @@ struct backlight_device { + registered this device has been unloaded, and if class_get_devdata() + points to something in the body of that driver, it is also invalid. */ + struct mutex ops_lock; +- struct backlight_ops *ops; ++ const struct backlight_ops *ops; + + /* The framebuffer notifier block */ + struct notifier_block fb_notif; +@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd) + } + + extern struct backlight_device *backlight_device_register(const char *name, +- struct device *dev, void *devdata, struct backlight_ops *ops); ++ struct device *dev, void *devdata, const struct backlight_ops *ops); + extern void backlight_device_unregister(struct backlight_device *bd); + extern void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h +index a3d802e..93a2ef4 100644 +--- a/include/linux/binfmts.h ++++ b/include/linux/binfmts.h +@@ -18,7 +18,7 @@ struct pt_regs; + #define BINPRM_BUF_SIZE 128 + + #ifdef __KERNEL__ +-#include <linux/list.h> ++#include <linux/sched.h> + + #define CORENAME_MAX_SIZE 128 + +@@ -58,6 +58,7 @@ struct linux_binprm{ + unsigned interp_flags; + unsigned interp_data; + unsigned long loader, exec; ++ char tcomm[TASK_COMM_LEN]; + }; + + extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages); +@@ -83,6 +84,7 @@ struct linux_binfmt { + int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); + int (*load_shlib)(struct file *); + int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); ++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags); + unsigned long min_coredump; /* minimal dump size */ + int hasvdso; + }; +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 5eb6cb0..a2906d2 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -1281,7 +1281,7 @@ struct block_device_operations { + int (*revalidate_disk) (struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + struct module *owner; +-}; ++} __do_const; + + extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h +index 3b73b99..629d21b 100644 +--- a/include/linux/blktrace_api.h ++++ b/include/linux/blktrace_api.h +@@ -160,7 +160,7 @@ struct blk_trace { + struct dentry *dir; + struct dentry *dropped_file; + struct dentry *msg_file; +- atomic_t dropped; ++ atomic_unchecked_t dropped; + }; + + extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h +index 83195fb..0b0f77d 100644 +--- a/include/linux/byteorder/little_endian.h ++++ b/include/linux/byteorder/little_endian.h +@@ -42,51 +42,51 @@ + + static inline __le64 __cpu_to_le64p(const __u64 *p) + { +- return (__force __le64)*p; ++ return (__force const __le64)*p; + } + static inline __u64 __le64_to_cpup(const __le64 *p) + { +- return (__force __u64)*p; ++ return (__force const __u64)*p; + } + static inline __le32 __cpu_to_le32p(const __u32 *p) + { +- return (__force __le32)*p; ++ return (__force const __le32)*p; + } + static inline __u32 __le32_to_cpup(const __le32 *p) + { +- return (__force __u32)*p; ++ return (__force const __u32)*p; + } + static inline __le16 __cpu_to_le16p(const __u16 *p) + { +- return (__force __le16)*p; ++ return (__force const __le16)*p; + } + static inline __u16 __le16_to_cpup(const __le16 *p) + { +- return (__force __u16)*p; ++ return (__force const __u16)*p; + } + static inline __be64 __cpu_to_be64p(const __u64 *p) + { +- return (__force __be64)__swab64p(p); ++ return (__force const __be64)__swab64p(p); + } + static inline __u64 __be64_to_cpup(const __be64 *p) + { +- return __swab64p((__u64 *)p); ++ return __swab64p((const __u64 *)p); + } + static inline __be32 __cpu_to_be32p(const __u32 *p) + { +- return (__force __be32)__swab32p(p); ++ return (__force const __be32)__swab32p(p); + } + static inline __u32 __be32_to_cpup(const __be32 *p) + { +- return __swab32p((__u32 *)p); ++ return __swab32p((const __u32 *)p); + } + static inline __be16 __cpu_to_be16p(const __u16 *p) + { +- return (__force __be16)__swab16p(p); ++ return (__force const __be16)__swab16p(p); + } + static inline __u16 __be16_to_cpup(const __be16 *p) + { +- return __swab16p((__u16 *)p); ++ return __swab16p((const __u16 *)p); + } + #define __cpu_to_le64s(x) do { (void)(x); } while (0) + #define __le64_to_cpus(x) do { (void)(x); } while (0) +diff --git a/include/linux/cache.h b/include/linux/cache.h +index 97e2488..e7576b9 100644 +--- a/include/linux/cache.h ++++ b/include/linux/cache.h +@@ -16,6 +16,10 @@ + #define __read_mostly + #endif + ++#ifndef __read_only ++#define __read_only __read_mostly ++#endif ++ + #ifndef ____cacheline_aligned + #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) + #endif +diff --git a/include/linux/capability.h b/include/linux/capability.h +index c8f2a5f7..1618a5c 100644 +--- a/include/linux/capability.h ++++ b/include/linux/capability.h +@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set; + (security_real_capable_noaudit((t), (cap)) == 0) + + extern int capable(int cap); ++int capable_nolog(int cap); + + /* audit system wants to get cap info from files as well */ + struct dentry; +diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h +index 450fa59..246fa19 100644 +--- a/include/linux/compiler-gcc4.h ++++ b/include/linux/compiler-gcc4.h +@@ -14,6 +14,9 @@ + #define __compiler_offsetof(a,b) __builtin_offsetof(a,b) + #define __always_inline inline __attribute__((always_inline)) + ++#ifdef SIZE_OVERFLOW_PLUGIN ++#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__))) ++#endif + /* + * A trick to suppress uninitialized variable warning without generating any + * code +@@ -36,4 +39,16 @@ + the kernel context */ + #define __cold __attribute__((__cold__)) + ++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__))) ++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg)) ++#define __bos0(ptr) __bos((ptr), 0) ++#define __bos1(ptr) __bos((ptr), 1) ++ ++#if __GNUC_MINOR__ >= 5 ++#ifdef CONSTIFY_PLUGIN ++#define __no_const __attribute__((no_const)) ++#define __do_const __attribute__((do_const)) ++#endif ++#endif ++ + #endif +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 04fb513..6189f3b 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -5,11 +5,14 @@ + + #ifdef __CHECKER__ + # define __user __attribute__((noderef, address_space(1))) ++# define __force_user __force __user + # define __kernel /* default address space */ ++# define __force_kernel __force __kernel + # define __safe __attribute__((safe)) + # define __force __attribute__((force)) + # define __nocast __attribute__((nocast)) + # define __iomem __attribute__((noderef, address_space(2))) ++# define __force_iomem __force __iomem + # define __acquires(x) __attribute__((context(x,0,1))) + # define __releases(x) __attribute__((context(x,1,0))) + # define __acquire(x) __context__(x,1) +@@ -17,13 +20,34 @@ + # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) + extern void __chk_user_ptr(const volatile void __user *); + extern void __chk_io_ptr(const volatile void __iomem *); ++#elif defined(CHECKER_PLUGIN) ++//# define __user ++//# define __force_user ++//# define __kernel ++//# define __force_kernel ++# define __safe ++# define __force ++# define __nocast ++# define __iomem ++# define __force_iomem ++# define __chk_user_ptr(x) (void)0 ++# define __chk_io_ptr(x) (void)0 ++# define __builtin_warning(x, y...) (1) ++# define __acquires(x) ++# define __releases(x) ++# define __acquire(x) (void)0 ++# define __release(x) (void)0 ++# define __cond_lock(x,c) (c) + #else + # define __user ++# define __force_user + # define __kernel ++# define __force_kernel + # define __safe + # define __force + # define __nocast + # define __iomem ++# define __force_iomem + # define __chk_user_ptr(x) (void)0 + # define __chk_io_ptr(x) (void)0 + # define __builtin_warning(x, y...) (1) +@@ -247,6 +271,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + # define __attribute_const__ /* unimplemented */ + #endif + ++#ifndef __no_const ++# define __no_const ++#endif ++ ++#ifndef __do_const ++# define __do_const ++#endif ++ ++#ifndef __size_overflow ++# define __size_overflow(...) ++#endif + /* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. +@@ -256,6 +291,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + #define __cold + #endif + ++#ifndef __alloc_size ++#define __alloc_size(...) ++#endif ++ ++#ifndef __bos ++#define __bos(ptr, arg) ++#endif ++ ++#ifndef __bos0 ++#define __bos0(ptr) ++#endif ++ ++#ifndef __bos1 ++#define __bos1(ptr) ++#endif ++ + /* Simple shorthand for a section definition */ + #ifndef __section + # define __section(S) __attribute__ ((__section__(#S))) +@@ -278,6 +329,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. + */ +-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x)) ++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x)) + + #endif /* __LINUX_COMPILER_H */ +diff --git a/include/linux/crypto.h b/include/linux/crypto.h +index fd92988..a3164bd 100644 +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -394,7 +394,7 @@ struct cipher_tfm { + const u8 *key, unsigned int keylen); + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +-}; ++} __no_const; + + struct hash_tfm { + int (*init)(struct hash_desc *desc); +@@ -415,13 +415,13 @@ struct compress_tfm { + int (*cot_decompress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +-}; ++} __no_const; + + struct rng_tfm { + int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); +-}; ++} __no_const; + + #define crt_ablkcipher crt_u.ablkcipher + #define crt_aead crt_u.aead +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index 30b93b2..cd7a8db 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -119,6 +119,8 @@ struct dentry { + unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ + }; + ++#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) ++ + /* + * dentry->d_lock spinlock nesting subclasses: + * +diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h +index 3e9bd6a..f4e1aa0 100644 +--- a/include/linux/decompress/mm.h ++++ b/include/linux/decompress/mm.h +@@ -78,7 +78,7 @@ static void free(void *where) + * warnings when not needed (indeed large_malloc / large_free are not + * needed by inflate */ + +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + + #define large_malloc(a) vmalloc(a) +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index 91b7618..92a93d32 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -16,51 +16,51 @@ enum dma_data_direction { + }; + + struct dma_map_ops { +- void* (*alloc_coherent)(struct device *dev, size_t size, ++ void* (* const alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +- void (*free_coherent)(struct device *dev, size_t size, ++ void (* const free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); +- dma_addr_t (*map_page)(struct device *dev, struct page *page, ++ dma_addr_t (* const map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs); +- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, ++ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); +- int (*map_sg)(struct device *dev, struct scatterlist *sg, ++ int (* const map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); +- void (*unmap_sg)(struct device *dev, ++ void (* const unmap_sg)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs); +- void (*sync_single_for_cpu)(struct device *dev, ++ void (* const sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); +- void (*sync_single_for_device)(struct device *dev, ++ void (* const sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); +- void (*sync_single_range_for_cpu)(struct device *dev, ++ void (* const sync_single_range_for_cpu)(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + enum dma_data_direction dir); +- void (*sync_single_range_for_device)(struct device *dev, ++ void (* const sync_single_range_for_device)(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + enum dma_data_direction dir); +- void (*sync_sg_for_cpu)(struct device *dev, ++ void (* const sync_sg_for_cpu)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); +- void (*sync_sg_for_device)(struct device *dev, ++ void (* const sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); +- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); +- int (*dma_supported)(struct device *dev, u64 mask); ++ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr); ++ int (* const dma_supported)(struct device *dev, u64 mask); + int (*set_dma_mask)(struct device *dev, u64 mask); + int is_phys; +-}; ++} __do_const; + + #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) + +diff --git a/include/linux/dst.h b/include/linux/dst.h +index e26fed8..b976d9f 100644 +--- a/include/linux/dst.h ++++ b/include/linux/dst.h +@@ -380,7 +380,7 @@ struct dst_node + struct thread_pool *pool; + + /* Transaction IDs live here */ +- atomic_long_t gen; ++ atomic_long_unchecked_t gen; + + /* + * How frequently and how many times transaction +diff --git a/include/linux/elf.h b/include/linux/elf.h +index 90a4ed0..d652617 100644 +--- a/include/linux/elf.h ++++ b/include/linux/elf.h +@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword; + #define PT_GNU_EH_FRAME 0x6474e550 + + #define PT_GNU_STACK (PT_LOOS + 0x474e551) ++#define PT_GNU_RELRO (PT_LOOS + 0x474e552) ++ ++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580) ++ ++/* Constants for the e_flags field */ ++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ + + /* These constants define the different elf file types */ + #define ET_NONE 0 +@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword; + #define DT_DEBUG 21 + #define DT_TEXTREL 22 + #define DT_JMPREL 23 ++#define DT_FLAGS 30 ++ #define DF_TEXTREL 0x00000004 + #define DT_ENCODING 32 + #define OLD_DT_LOOS 0x60000000 + #define DT_LOOS 0x6000000d +@@ -230,6 +243,19 @@ typedef struct elf64_hdr { + #define PF_W 0x2 + #define PF_X 0x1 + ++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */ ++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */ ++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */ ++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */ ++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */ ++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */ ++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */ ++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */ ++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */ ++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */ ++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */ ++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */ ++ + typedef struct elf32_phdr{ + Elf32_Word p_type; + Elf32_Off p_offset; +@@ -322,6 +348,8 @@ typedef struct elf64_shdr { + #define EI_OSABI 7 + #define EI_PAD 8 + ++#define EI_PAX 14 ++ + #define ELFMAG0 0x7f /* EI_MAG */ + #define ELFMAG1 'E' + #define ELFMAG2 'L' +@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC []; + #define elf_phdr elf32_phdr + #define elf_note elf32_note + #define elf_addr_t Elf32_Off ++#define elf_dyn Elf32_Dyn + + #else + +@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC []; + #define elf_phdr elf64_phdr + #define elf_note elf64_note + #define elf_addr_t Elf64_Off ++#define elf_dyn Elf64_Dyn + + #endif + +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 1b9a47a..6fe2934 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); + + struct address_space_operations { +- int (*writepage)(struct page *page, struct writeback_control *wbc); +- int (*readpage)(struct file *, struct page *); +- void (*sync_page)(struct page *); ++ int (* const writepage)(struct page *page, struct writeback_control *wbc); ++ int (* const readpage)(struct file *, struct page *); ++ void (* const sync_page)(struct page *); + + /* Write back some dirty pages from this mapping. */ +- int (*writepages)(struct address_space *, struct writeback_control *); ++ int (* const writepages)(struct address_space *, struct writeback_control *); + + /* Set a page dirty. Return true if this dirtied it */ +- int (*set_page_dirty)(struct page *page); ++ int (* const set_page_dirty)(struct page *page); + +- int (*readpages)(struct file *filp, struct address_space *mapping, ++ int (* const readpages)(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + +- int (*write_begin)(struct file *, struct address_space *mapping, ++ int (* const write_begin)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +- int (*write_end)(struct file *, struct address_space *mapping, ++ int (* const write_end)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + + /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ +- sector_t (*bmap)(struct address_space *, sector_t); +- void (*invalidatepage) (struct page *, unsigned long); +- int (*releasepage) (struct page *, gfp_t); +- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, ++ sector_t (* const bmap)(struct address_space *, sector_t); ++ void (* const invalidatepage) (struct page *, unsigned long); ++ int (* const releasepage) (struct page *, gfp_t); ++ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov, + loff_t offset, unsigned long nr_segs); +- int (*get_xip_mem)(struct address_space *, pgoff_t, int, ++ int (* const get_xip_mem)(struct address_space *, pgoff_t, int, + void **, unsigned long *); + /* migrate the contents of a page to the specified target */ +- int (*migratepage) (struct address_space *, ++ int (* const migratepage) (struct address_space *, + struct page *, struct page *); +- int (*launder_page) (struct page *); +- int (*is_partially_uptodate) (struct page *, read_descriptor_t *, ++ int (* const launder_page) (struct page *); ++ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *, + unsigned long); +- int (*error_remove_page)(struct address_space *, struct page *); ++ int (* const error_remove_page)(struct address_space *, struct page *); + }; + + /* +@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp) + typedef struct files_struct *fl_owner_t; + + struct file_lock_operations { +- void (*fl_copy_lock)(struct file_lock *, struct file_lock *); +- void (*fl_release_private)(struct file_lock *); ++ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *); ++ void (* const fl_release_private)(struct file_lock *); + }; + + struct lock_manager_operations { +- int (*fl_compare_owner)(struct file_lock *, struct file_lock *); +- void (*fl_notify)(struct file_lock *); /* unblock callback */ +- int (*fl_grant)(struct file_lock *, struct file_lock *, int); +- void (*fl_copy_lock)(struct file_lock *, struct file_lock *); +- void (*fl_release_private)(struct file_lock *); +- void (*fl_break)(struct file_lock *); +- int (*fl_mylease)(struct file_lock *, struct file_lock *); +- int (*fl_change)(struct file_lock **, int); ++ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *); ++ void (* const fl_notify)(struct file_lock *); /* unblock callback */ ++ int (* const fl_grant)(struct file_lock *, struct file_lock *, int); ++ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *); ++ void (* const fl_release_private)(struct file_lock *); ++ void (* const fl_break)(struct file_lock *); ++ int (* const fl_mylease)(struct file_lock *, struct file_lock *); ++ int (* const fl_change)(struct file_lock **, int); + }; + + struct lock_manager { +@@ -1442,7 +1442,7 @@ struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ +- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent ++ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent + * array */ + }; + int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, +@@ -1512,7 +1512,8 @@ struct file_operations { + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*setlease)(struct file *, long, struct file_lock **); +-}; ++} __do_const; ++typedef struct file_operations __no_const file_operations_no_const; + + struct inode_operations { + int (*create) (struct inode *,struct dentry *,int, struct nameidata *); +@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *, + unsigned long, loff_t *); + + struct super_operations { +- struct inode *(*alloc_inode)(struct super_block *sb); +- void (*destroy_inode)(struct inode *); ++ struct inode *(* const alloc_inode)(struct super_block *sb); ++ void (* const destroy_inode)(struct inode *); + +- void (*dirty_inode) (struct inode *); +- int (*write_inode) (struct inode *, int); +- void (*drop_inode) (struct inode *); +- void (*delete_inode) (struct inode *); +- void (*put_super) (struct super_block *); +- void (*write_super) (struct super_block *); +- int (*sync_fs)(struct super_block *sb, int wait); +- int (*freeze_fs) (struct super_block *); +- int (*unfreeze_fs) (struct super_block *); +- int (*statfs) (struct dentry *, struct kstatfs *); +- int (*remount_fs) (struct super_block *, int *, char *); +- void (*clear_inode) (struct inode *); +- void (*umount_begin) (struct super_block *); ++ void (* const dirty_inode) (struct inode *); ++ int (* const write_inode) (struct inode *, int); ++ void (* const drop_inode) (struct inode *); ++ void (* const delete_inode) (struct inode *); ++ void (* const put_super) (struct super_block *); ++ void (* const write_super) (struct super_block *); ++ int (* const sync_fs)(struct super_block *sb, int wait); ++ int (* const freeze_fs) (struct super_block *); ++ int (* const unfreeze_fs) (struct super_block *); ++ int (* const statfs) (struct dentry *, struct kstatfs *); ++ int (* const remount_fs) (struct super_block *, int *, char *); ++ void (* const clear_inode) (struct inode *); ++ void (* const umount_begin) (struct super_block *); + +- int (*show_options)(struct seq_file *, struct vfsmount *); +- int (*show_stats)(struct seq_file *, struct vfsmount *); ++ int (* const show_options)(struct seq_file *, struct vfsmount *); ++ int (* const show_stats)(struct seq_file *, struct vfsmount *); + #ifdef CONFIG_QUOTA +- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); +- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); ++ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t); ++ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t); + #endif +- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); ++ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); + }; + + /* +diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h +index 78a05bf..2a7d3e1 100644 +--- a/include/linux/fs_struct.h ++++ b/include/linux/fs_struct.h +@@ -4,7 +4,7 @@ + #include <linux/path.h> + + struct fs_struct { +- int users; ++ atomic_t users; + rwlock_t lock; + int umask; + int in_exec; +diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h +index 7be0c6f..2f63a2b 100644 +--- a/include/linux/fscache-cache.h ++++ b/include/linux/fscache-cache.h +@@ -116,7 +116,7 @@ struct fscache_operation { + #endif + }; + +-extern atomic_t fscache_op_debug_id; ++extern atomic_unchecked_t fscache_op_debug_id; + extern const struct slow_work_ops fscache_op_slow_work_ops; + + extern void fscache_enqueue_operation(struct fscache_operation *); +@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, + fscache_operation_release_t release) + { + atomic_set(&op->usage, 1); +- op->debug_id = atomic_inc_return(&fscache_op_debug_id); ++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); + op->release = release; + INIT_LIST_HEAD(&op->pend_link); + fscache_set_op_state(op, "Init"); +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h +index 4d6f47b..00bcedb 100644 +--- a/include/linux/fsnotify_backend.h ++++ b/include/linux/fsnotify_backend.h +@@ -86,6 +86,7 @@ struct fsnotify_ops { + void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group); + void (*free_event_priv)(struct fsnotify_event_private_data *priv); + }; ++typedef struct fsnotify_ops __no_const fsnotify_ops_no_const; + + /* + * A group is a "thing" that wants to receive notification about filesystem +diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h +index 4ec5e67..42f1eb9 100644 +--- a/include/linux/ftrace_event.h ++++ b/include/linux/ftrace_event.h +@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call, + int filter_type); + extern int trace_define_common_fields(struct ftrace_event_call *call); + +-#define is_signed_type(type) (((type)(-1)) < 0) ++#define is_signed_type(type) (((type)(-1)) < (type)1) + + int trace_set_clr_event(const char *system, const char *event, int set); + +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index 297df45..b6a74ff 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -161,7 +161,7 @@ struct gendisk { + + struct timer_rand_state *random; + +- atomic_t sync_io; /* RAID */ ++ atomic_unchecked_t sync_io; /* RAID */ + struct work_struct async_notify; + #ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity *integrity; +diff --git a/include/linux/gracl.h b/include/linux/gracl.h +new file mode 100644 +index 0000000..6c51079 +--- /dev/null ++++ b/include/linux/gracl.h +@@ -0,0 +1,320 @@ ++#ifndef GR_ACL_H ++#define GR_ACL_H ++ ++#include <linux/grdefs.h> ++#include <linux/resource.h> ++#include <linux/capability.h> ++#include <linux/dcache.h> ++#include <asm/resource.h> ++ ++/* Major status information */ ++ ++#define GR_VERSION "grsecurity 2.9" ++#define GRSECURITY_VERSION 0x2900 ++ ++enum { ++ GR_SHUTDOWN = 0, ++ GR_ENABLE = 1, ++ GR_SPROLE = 2, ++ GR_RELOAD = 3, ++ GR_SEGVMOD = 4, ++ GR_STATUS = 5, ++ GR_UNSPROLE = 6, ++ GR_PASSSET = 7, ++ GR_SPROLEPAM = 8, ++}; ++ ++/* Password setup definitions ++ * kernel/grhash.c */ ++enum { ++ GR_PW_LEN = 128, ++ GR_SALT_LEN = 16, ++ GR_SHA_LEN = 32, ++}; ++ ++enum { ++ GR_SPROLE_LEN = 64, ++}; ++ ++enum { ++ GR_NO_GLOB = 0, ++ GR_REG_GLOB, ++ GR_CREATE_GLOB ++}; ++ ++#define GR_NLIMITS 32 ++ ++/* Begin Data Structures */ ++ ++struct sprole_pw { ++ unsigned char *rolename; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ ++}; ++ ++struct name_entry { ++ __u32 key; ++ ino_t inode; ++ dev_t device; ++ char *name; ++ __u16 len; ++ __u8 deleted; ++ struct name_entry *prev; ++ struct name_entry *next; ++}; ++ ++struct inodev_entry { ++ struct name_entry *nentry; ++ struct inodev_entry *prev; ++ struct inodev_entry *next; ++}; ++ ++struct acl_role_db { ++ struct acl_role_label **r_hash; ++ __u32 r_size; ++}; ++ ++struct inodev_db { ++ struct inodev_entry **i_hash; ++ __u32 i_size; ++}; ++ ++struct name_db { ++ struct name_entry **n_hash; ++ __u32 n_size; ++}; ++ ++struct crash_uid { ++ uid_t uid; ++ unsigned long expires; ++}; ++ ++struct gr_hash_struct { ++ void **table; ++ void **nametable; ++ void *first; ++ __u32 table_size; ++ __u32 used_size; ++ int type; ++}; ++ ++/* Userspace Grsecurity ACL data structures */ ++ ++struct acl_subject_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ kernel_cap_t cap_mask; ++ kernel_cap_t cap_lower; ++ kernel_cap_t cap_invert_audit; ++ ++ struct rlimit res[GR_NLIMITS]; ++ __u32 resmask; ++ ++ __u8 user_trans_type; ++ __u8 group_trans_type; ++ uid_t *user_transitions; ++ gid_t *group_transitions; ++ __u16 user_trans_num; ++ __u16 group_trans_num; ++ ++ __u32 sock_families[2]; ++ __u32 ip_proto[8]; ++ __u32 ip_type; ++ struct acl_ip_label **ips; ++ __u32 ip_num; ++ __u32 inaddr_any_override; ++ ++ __u32 crashes; ++ unsigned long expires; ++ ++ struct acl_subject_label *parent_subject; ++ struct gr_hash_struct *hash; ++ struct acl_subject_label *prev; ++ struct acl_subject_label *next; ++ ++ struct acl_object_label **obj_hash; ++ __u32 obj_hash_size; ++ __u16 pax_flags; ++}; ++ ++struct role_allowed_ip { ++ __u32 addr; ++ __u32 netmask; ++ ++ struct role_allowed_ip *prev; ++ struct role_allowed_ip *next; ++}; ++ ++struct role_transition { ++ char *rolename; ++ ++ struct role_transition *prev; ++ struct role_transition *next; ++}; ++ ++struct acl_role_label { ++ char *rolename; ++ uid_t uidgid; ++ __u16 roletype; ++ ++ __u16 auth_attempts; ++ unsigned long expires; ++ ++ struct acl_subject_label *root_label; ++ struct gr_hash_struct *hash; ++ ++ struct acl_role_label *prev; ++ struct acl_role_label *next; ++ ++ struct role_transition *transitions; ++ struct role_allowed_ip *allowed_ips; ++ uid_t *domain_children; ++ __u16 domain_child_num; ++ ++ // __u16 ++ umode_t umask; ++ ++ struct acl_subject_label **subj_hash; ++ __u32 subj_hash_size; ++}; ++ ++struct user_acl_role_db { ++ struct acl_role_label **r_table; ++ __u32 num_pointers; /* Number of allocations to track */ ++ __u32 num_roles; /* Number of roles */ ++ __u32 num_domain_children; /* Number of domain children */ ++ __u32 num_subjects; /* Number of subjects */ ++ __u32 num_objects; /* Number of objects */ ++}; ++ ++struct acl_object_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ ++ struct acl_subject_label *nested; ++ struct acl_object_label *globbed; ++ ++ /* next two structures not used */ ++ ++ struct acl_object_label *prev; ++ struct acl_object_label *next; ++}; ++ ++struct acl_ip_label { ++ char *iface; ++ __u32 addr; ++ __u32 netmask; ++ __u16 low, high; ++ __u8 mode; ++ __u32 type; ++ __u32 proto[8]; ++ ++ /* next two structures not used */ ++ ++ struct acl_ip_label *prev; ++ struct acl_ip_label *next; ++}; ++ ++struct gr_arg { ++ struct user_acl_role_db role_db; ++ unsigned char pw[GR_PW_LEN]; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; ++ unsigned char sp_role[GR_SPROLE_LEN]; ++ struct sprole_pw *sprole_pws; ++ dev_t segv_device; ++ ino_t segv_inode; ++ uid_t segv_uid; ++ __u16 num_sprole_pws; ++ __u16 mode; ++}; ++ ++struct gr_arg_wrapper { ++ struct gr_arg *arg; ++ __u32 version; ++ __u32 size; ++}; ++ ++struct subject_map { ++ struct acl_subject_label *user; ++ struct acl_subject_label *kernel; ++ struct subject_map *prev; ++ struct subject_map *next; ++}; ++ ++struct acl_subj_map_db { ++ struct subject_map **s_hash; ++ __u32 s_size; ++}; ++ ++/* End Data Structures Section */ ++ ++/* Hash functions generated by empirical testing by Brad Spengler ++ Makes good use of the low bits of the inode. Generally 0-1 times ++ in loop for successful match. 0-3 for unsuccessful match. ++ Shift/add algorithm with modulus of table size and an XOR*/ ++ ++static __inline__ unsigned int ++rhash(const uid_t uid, const __u16 type, const unsigned int sz) ++{ ++ return ((((uid + type) << (16 + type)) ^ uid) % sz); ++} ++ ++ static __inline__ unsigned int ++shash(const struct acl_subject_label *userp, const unsigned int sz) ++{ ++ return ((const unsigned long)userp % sz); ++} ++ ++static __inline__ unsigned int ++fhash(const ino_t ino, const dev_t dev, const unsigned int sz) ++{ ++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); ++} ++ ++static __inline__ unsigned int ++nhash(const char *name, const __u16 len, const unsigned int sz) ++{ ++ return full_name_hash((const unsigned char *)name, len) % sz; ++} ++ ++#define FOR_EACH_ROLE_START(role) \ ++ role = role_list; \ ++ while (role) { ++ ++#define FOR_EACH_ROLE_END(role) \ ++ role = role->prev; \ ++ } ++ ++#define FOR_EACH_SUBJECT_START(role,subj,iter) \ ++ subj = NULL; \ ++ iter = 0; \ ++ while (iter < role->subj_hash_size) { \ ++ if (subj == NULL) \ ++ subj = role->subj_hash[iter]; \ ++ if (subj == NULL) { \ ++ iter++; \ ++ continue; \ ++ } ++ ++#define FOR_EACH_SUBJECT_END(subj,iter) \ ++ subj = subj->next; \ ++ if (subj == NULL) \ ++ iter++; \ ++ } ++ ++ ++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \ ++ subj = role->hash->first; \ ++ while (subj != NULL) { ++ ++#define FOR_EACH_NESTED_SUBJECT_END(subj) \ ++ subj = subj->next; \ ++ } ++ ++#endif ++ +diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h +new file mode 100644 +index 0000000..323ecf2 +--- /dev/null ++++ b/include/linux/gralloc.h +@@ -0,0 +1,9 @@ ++#ifndef __GRALLOC_H ++#define __GRALLOC_H ++ ++void acl_free_all(void); ++int acl_alloc_stack_init(unsigned long size); ++void *acl_alloc(unsigned long len); ++void *acl_alloc_num(unsigned long num, unsigned long len); ++ ++#endif +diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h +new file mode 100644 +index 0000000..70d6cd5 +--- /dev/null ++++ b/include/linux/grdefs.h +@@ -0,0 +1,140 @@ ++#ifndef GRDEFS_H ++#define GRDEFS_H ++ ++/* Begin grsecurity status declarations */ ++ ++enum { ++ GR_READY = 0x01, ++ GR_STATUS_INIT = 0x00 // disabled state ++}; ++ ++/* Begin ACL declarations */ ++ ++/* Role flags */ ++ ++enum { ++ GR_ROLE_USER = 0x0001, ++ GR_ROLE_GROUP = 0x0002, ++ GR_ROLE_DEFAULT = 0x0004, ++ GR_ROLE_SPECIAL = 0x0008, ++ GR_ROLE_AUTH = 0x0010, ++ GR_ROLE_NOPW = 0x0020, ++ GR_ROLE_GOD = 0x0040, ++ GR_ROLE_LEARN = 0x0080, ++ GR_ROLE_TPE = 0x0100, ++ GR_ROLE_DOMAIN = 0x0200, ++ GR_ROLE_PAM = 0x0400, ++ GR_ROLE_PERSIST = 0x800 ++}; ++ ++/* ACL Subject and Object mode flags */ ++enum { ++ GR_DELETED = 0x80000000 ++}; ++ ++/* ACL Object-only mode flags */ ++enum { ++ GR_READ = 0x00000001, ++ GR_APPEND = 0x00000002, ++ GR_WRITE = 0x00000004, ++ GR_EXEC = 0x00000008, ++ GR_FIND = 0x00000010, ++ GR_INHERIT = 0x00000020, ++ GR_SETID = 0x00000040, ++ GR_CREATE = 0x00000080, ++ GR_DELETE = 0x00000100, ++ GR_LINK = 0x00000200, ++ GR_AUDIT_READ = 0x00000400, ++ GR_AUDIT_APPEND = 0x00000800, ++ GR_AUDIT_WRITE = 0x00001000, ++ GR_AUDIT_EXEC = 0x00002000, ++ GR_AUDIT_FIND = 0x00004000, ++ GR_AUDIT_INHERIT= 0x00008000, ++ GR_AUDIT_SETID = 0x00010000, ++ GR_AUDIT_CREATE = 0x00020000, ++ GR_AUDIT_DELETE = 0x00040000, ++ GR_AUDIT_LINK = 0x00080000, ++ GR_PTRACERD = 0x00100000, ++ GR_NOPTRACE = 0x00200000, ++ GR_SUPPRESS = 0x00400000, ++ GR_NOLEARN = 0x00800000, ++ GR_INIT_TRANSFER= 0x01000000 ++}; ++ ++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ ++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) ++ ++/* ACL subject-only mode flags */ ++enum { ++ GR_KILL = 0x00000001, ++ GR_VIEW = 0x00000002, ++ GR_PROTECTED = 0x00000004, ++ GR_LEARN = 0x00000008, ++ GR_OVERRIDE = 0x00000010, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_DUMMY = 0x00000020, ++ GR_PROTSHM = 0x00000040, ++ GR_KILLPROC = 0x00000080, ++ GR_KILLIPPROC = 0x00000100, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_NOTROJAN = 0x00000200, ++ GR_PROTPROCFD = 0x00000400, ++ GR_PROCACCT = 0x00000800, ++ GR_RELAXPTRACE = 0x00001000, ++ GR_NESTED = 0x00002000, ++ GR_INHERITLEARN = 0x00004000, ++ GR_PROCFIND = 0x00008000, ++ GR_POVERRIDE = 0x00010000, ++ GR_KERNELAUTH = 0x00020000, ++ GR_ATSECURE = 0x00040000, ++ GR_SHMEXEC = 0x00080000 ++}; ++ ++enum { ++ GR_PAX_ENABLE_SEGMEXEC = 0x0001, ++ GR_PAX_ENABLE_PAGEEXEC = 0x0002, ++ GR_PAX_ENABLE_MPROTECT = 0x0004, ++ GR_PAX_ENABLE_RANDMMAP = 0x0008, ++ GR_PAX_ENABLE_EMUTRAMP = 0x0010, ++ GR_PAX_DISABLE_SEGMEXEC = 0x0100, ++ GR_PAX_DISABLE_PAGEEXEC = 0x0200, ++ GR_PAX_DISABLE_MPROTECT = 0x0400, ++ GR_PAX_DISABLE_RANDMMAP = 0x0800, ++ GR_PAX_DISABLE_EMUTRAMP = 0x1000, ++}; ++ ++enum { ++ GR_ID_USER = 0x01, ++ GR_ID_GROUP = 0x02, ++}; ++ ++enum { ++ GR_ID_ALLOW = 0x01, ++ GR_ID_DENY = 0x02, ++}; ++ ++#define GR_CRASH_RES 31 ++#define GR_UIDTABLE_MAX 500 ++ ++/* begin resource learning section */ ++enum { ++ GR_RLIM_CPU_BUMP = 60, ++ GR_RLIM_FSIZE_BUMP = 50000, ++ GR_RLIM_DATA_BUMP = 10000, ++ GR_RLIM_STACK_BUMP = 1000, ++ GR_RLIM_CORE_BUMP = 10000, ++ GR_RLIM_RSS_BUMP = 500000, ++ GR_RLIM_NPROC_BUMP = 1, ++ GR_RLIM_NOFILE_BUMP = 5, ++ GR_RLIM_MEMLOCK_BUMP = 50000, ++ GR_RLIM_AS_BUMP = 500000, ++ GR_RLIM_LOCKS_BUMP = 2, ++ GR_RLIM_SIGPENDING_BUMP = 5, ++ GR_RLIM_MSGQUEUE_BUMP = 10000, ++ GR_RLIM_NICE_BUMP = 1, ++ GR_RLIM_RTPRIO_BUMP = 1, ++ GR_RLIM_RTTIME_BUMP = 1000000 ++}; ++ ++#endif +diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h +new file mode 100644 +index 0000000..3826b91 +--- /dev/null ++++ b/include/linux/grinternal.h +@@ -0,0 +1,219 @@ ++#ifndef __GRINTERNAL_H ++#define __GRINTERNAL_H ++ ++#ifdef CONFIG_GRKERNSEC ++ ++#include <linux/fs.h> ++#include <linux/mnt_namespace.h> ++#include <linux/nsproxy.h> ++#include <linux/gracl.h> ++#include <linux/grdefs.h> ++#include <linux/grmsg.h> ++ ++void gr_add_learn_entry(const char *fmt, ...) ++ __attribute__ ((format (printf, 1, 2))); ++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode, ++ const struct vfsmount *mnt); ++__u32 gr_check_create(const struct dentry *new_dentry, ++ const struct dentry *parent, ++ const struct vfsmount *mnt, const __u32 mode); ++int gr_check_protected_task(const struct task_struct *task); ++__u32 to_gr_audit(const __u32 reqmode); ++int gr_set_acls(const int type); ++int gr_apply_subject_to_task(struct task_struct *task); ++int gr_acl_is_enabled(void); ++char gr_roletype_to_char(void); ++ ++void gr_handle_alertkill(struct task_struct *task); ++char *gr_to_filename(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename1(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename2(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename3(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++ ++extern int grsec_enable_ptrace_readexec; ++extern int grsec_enable_harden_ptrace; ++extern int grsec_enable_link; ++extern int grsec_enable_fifo; ++extern int grsec_enable_shm; ++extern int grsec_enable_execlog; ++extern int grsec_enable_signal; ++extern int grsec_enable_audit_ptrace; ++extern int grsec_enable_forkfail; ++extern int grsec_enable_time; ++extern int grsec_enable_rofs; ++extern int grsec_enable_chroot_shmat; ++extern int grsec_enable_chroot_mount; ++extern int grsec_enable_chroot_double; ++extern int grsec_enable_chroot_pivot; ++extern int grsec_enable_chroot_chdir; ++extern int grsec_enable_chroot_chmod; ++extern int grsec_enable_chroot_mknod; ++extern int grsec_enable_chroot_fchdir; ++extern int grsec_enable_chroot_nice; ++extern int grsec_enable_chroot_execlog; ++extern int grsec_enable_chroot_caps; ++extern int grsec_enable_chroot_sysctl; ++extern int grsec_enable_chroot_unix; ++extern int grsec_enable_tpe; ++extern int grsec_tpe_gid; ++extern int grsec_enable_tpe_all; ++extern int grsec_enable_tpe_invert; ++extern int grsec_enable_socket_all; ++extern int grsec_socket_all_gid; ++extern int grsec_enable_socket_client; ++extern int grsec_socket_client_gid; ++extern int grsec_enable_socket_server; ++extern int grsec_socket_server_gid; ++extern int grsec_audit_gid; ++extern int grsec_enable_group; ++extern int grsec_enable_audit_textrel; ++extern int grsec_enable_log_rwxmaps; ++extern int grsec_enable_mount; ++extern int grsec_enable_chdir; ++extern int grsec_resource_logging; ++extern int grsec_enable_blackhole; ++extern int grsec_lastack_retries; ++extern int grsec_enable_brute; ++extern int grsec_lock; ++ ++extern spinlock_t grsec_alert_lock; ++extern unsigned long grsec_alert_wtime; ++extern unsigned long grsec_alert_fyet; ++ ++extern spinlock_t grsec_audit_lock; ++ ++extern rwlock_t grsec_exec_file_lock; ++ ++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \ ++ gr_to_filename2((tsk)->exec_file->f_path.dentry, \ ++ (tsk)->exec_file->f_vfsmnt) : "/") ++ ++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \ ++ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \ ++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/") ++ ++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \ ++ gr_to_filename((tsk)->exec_file->f_path.dentry, \ ++ (tsk)->exec_file->f_vfsmnt) : "/") ++ ++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \ ++ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \ ++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/") ++ ++#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted) ++ ++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry) ++ ++#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \ ++ (task)->pid, (cred)->uid, \ ++ (cred)->euid, (cred)->gid, (cred)->egid, \ ++ gr_parent_task_fullpath(task), \ ++ (task)->real_parent->comm, (task)->real_parent->pid, \ ++ (pcred)->uid, (pcred)->euid, \ ++ (pcred)->gid, (pcred)->egid ++ ++#define GR_CHROOT_CAPS {{ \ ++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ ++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ ++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ ++ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \ ++ CAP_TO_MASK(CAP_MAC_ADMIN) }} ++ ++#define security_learn(normal_msg,args...) \ ++({ \ ++ read_lock(&grsec_exec_file_lock); \ ++ gr_add_learn_entry(normal_msg "\n", ## args); \ ++ read_unlock(&grsec_exec_file_lock); \ ++}) ++ ++enum { ++ GR_DO_AUDIT, ++ GR_DONT_AUDIT, ++ GR_DONT_AUDIT_GOOD ++}; ++ ++enum { ++ GR_TTYSNIFF, ++ GR_RBAC, ++ GR_RBAC_STR, ++ GR_STR_RBAC, ++ GR_RBAC_MODE2, ++ GR_RBAC_MODE3, ++ GR_FILENAME, ++ GR_SYSCTL_HIDDEN, ++ GR_NOARGS, ++ GR_ONE_INT, ++ GR_ONE_INT_TWO_STR, ++ GR_ONE_STR, ++ GR_STR_INT, ++ GR_TWO_STR_INT, ++ GR_TWO_INT, ++ GR_TWO_U64, ++ GR_THREE_INT, ++ GR_FIVE_INT_TWO_STR, ++ GR_TWO_STR, ++ GR_THREE_STR, ++ GR_FOUR_STR, ++ GR_STR_FILENAME, ++ GR_FILENAME_STR, ++ GR_FILENAME_TWO_INT, ++ GR_FILENAME_TWO_INT_STR, ++ GR_TEXTREL, ++ GR_PTRACE, ++ GR_RESOURCE, ++ GR_CAP, ++ GR_SIG, ++ GR_SIG2, ++ GR_CRASH1, ++ GR_CRASH2, ++ GR_PSACCT, ++ GR_RWXMAP ++}; ++ ++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str) ++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task) ++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) ++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) ++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) ++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) ++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) ++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) ++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) ++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) ++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) ++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) ++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) ++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) ++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2) ++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) ++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) ++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) ++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num) ++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) ++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) ++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) ++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) ++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) ++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) ++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2) ++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) ++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) ++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) ++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr) ++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num) ++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) ++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1) ++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) ++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str) ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...); ++ ++#endif ++ ++#endif +diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h +new file mode 100644 +index 0000000..f885406 +--- /dev/null ++++ b/include/linux/grmsg.h +@@ -0,0 +1,109 @@ ++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " ++#define GR_STOPMOD_MSG "denied modification of module state by " ++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by " ++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by " ++#define GR_IOPERM_MSG "denied use of ioperm() by " ++#define GR_IOPL_MSG "denied use of iopl() by " ++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " ++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by " ++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " ++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by " ++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " ++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4" ++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4" ++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " ++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " ++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " ++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " ++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by " ++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " ++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by " ++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against " ++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " ++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " ++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " ++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " ++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " ++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " ++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " ++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " ++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by " ++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " ++#define GR_EXEC_ACL_MSG "%s execution of %.950s by " ++#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by " ++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds" ++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds" ++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by " ++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by " ++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " ++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " ++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " ++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by " ++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by " ++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " ++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by " ++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " ++#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by " ++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " ++#define GR_INITF_ACL_MSG "init_variables() failed %s by " ++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader" ++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by " ++#define GR_SHUTS_ACL_MSG "shutdown auth success for " ++#define GR_SHUTF_ACL_MSG "shutdown auth failure for " ++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " ++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " ++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " ++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " ++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " ++#define GR_ENABLEF_ACL_MSG "unable to load %s for " ++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system" ++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " ++#define GR_RELOADF_ACL_MSG "failed reload of %s for " ++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for " ++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " ++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " ++#define GR_SPROLEF_ACL_MSG "special role %s failure for " ++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for " ++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " ++#define GR_INVMODE_ACL_MSG "invalid mode %d by " ++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by " ++#define GR_FAILFORK_MSG "failed fork with errno %s by " ++#define GR_NICE_CHROOT_MSG "denied priority change by " ++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in " ++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " ++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by " ++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by " ++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " ++#define GR_TIME_MSG "time set by " ++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by " ++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " ++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " ++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by " ++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by " ++#define GR_BIND_MSG "denied bind() by " ++#define GR_CONNECT_MSG "denied connect() by " ++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4" ++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " ++#define GR_CAP_ACL_MSG "use of %s denied for " ++#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for " ++#define GR_CAP_ACL_MSG2 "use of %s permitted for " ++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for " ++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for " ++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by " ++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by " ++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by " ++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " ++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by " ++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for " ++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by " ++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by " ++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " ++#define GR_VM86_MSG "denied use of vm86 by " ++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by " ++#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by " ++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " ++#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h +new file mode 100644 +index 0000000..c1793ae +--- /dev/null ++++ b/include/linux/grsecurity.h +@@ -0,0 +1,219 @@ ++#ifndef GR_SECURITY_H ++#define GR_SECURITY_H ++#include <linux/fs.h> ++#include <linux/fs_struct.h> ++#include <linux/binfmts.h> ++#include <linux/gracl.h> ++#include <linux/compat.h> ++ ++/* notify of brain-dead configs */ ++#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled." ++#endif ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) ++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled." ++#endif ++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP) ++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled." ++#endif ++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR) ++#error "CONFIG_PAX enabled, but no PaX options are enabled." ++#endif ++ ++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags); ++void gr_handle_brute_check(void); ++void gr_handle_kernel_exploit(void); ++int gr_process_user_ban(void); ++ ++char gr_roletype_to_char(void); ++ ++int gr_acl_enable_at_secure(void); ++ ++int gr_check_user_change(int real, int effective, int fs); ++int gr_check_group_change(int real, int effective, int fs); ++ ++void gr_del_task_from_ip_table(struct task_struct *p); ++ ++int gr_pid_is_chrooted(struct task_struct *p); ++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type); ++int gr_handle_chroot_nice(void); ++int gr_handle_chroot_sysctl(const int op); ++int gr_handle_chroot_setpriority(struct task_struct *p, ++ const int niceval); ++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); ++int gr_handle_chroot_chroot(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_chroot_chdir(struct path *path); ++int gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const char *dev_name); ++int gr_handle_chroot_pivot(void); ++int gr_handle_chroot_unix(const pid_t pid); ++ ++int gr_handle_rawio(const struct inode *inode); ++ ++void gr_handle_ioperm(void); ++void gr_handle_iopl(void); ++ ++umode_t gr_acl_umask(void); ++ ++int gr_tpe_allow(const struct file *file); ++ ++void gr_set_chroot_entries(struct task_struct *task, struct path *path); ++void gr_clear_chroot_entries(struct task_struct *task); ++ ++void gr_log_forkfail(const int retval); ++void gr_log_timechange(void); ++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t); ++void gr_log_chdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_log_chroot_exec(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv); ++#ifdef CONFIG_COMPAT ++void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv); ++#endif ++void gr_log_remount(const char *devname, const int retval); ++void gr_log_unmount(const char *devname, const int retval); ++void gr_log_mount(const char *from, const char *to, const int retval); ++void gr_log_textrel(struct vm_area_struct *vma); ++void gr_log_rwxmmap(struct file *file); ++void gr_log_rwxmprotect(struct file *file); ++ ++int gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_fifo(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, ++ const int acc_mode); ++int gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, ++ const int mode, const char *to); ++ ++int gr_is_capable(const int cap); ++int gr_is_capable_nolog(const int cap); ++void gr_learn_resource(const struct task_struct *task, const int limit, ++ const unsigned long wanted, const int gt); ++void gr_copy_label(struct task_struct *tsk); ++void gr_handle_crash(struct task_struct *task, const int sig); ++int gr_handle_signal(const struct task_struct *p, const int sig); ++int gr_check_crash_uid(const uid_t uid); ++int gr_check_protected_task(const struct task_struct *task); ++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type); ++int gr_acl_handle_mmap(const struct file *file, ++ const unsigned long prot); ++int gr_acl_handle_mprotect(const struct file *file, ++ const unsigned long prot); ++int gr_check_hidden_task(const struct task_struct *tsk); ++__u32 gr_acl_handle_truncate(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_utime(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_access(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int fmode); ++__u32 gr_acl_handle_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, umode_t *mode); ++__u32 gr_acl_handle_chown(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_setxattr(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_ptrace(struct task_struct *task, const long request); ++int gr_handle_proc_ptrace(struct task_struct *task); ++__u32 gr_acl_handle_execve(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_check_crash_exec(const struct file *filp); ++int gr_acl_is_enabled(void); ++void gr_set_kernel_label(struct task_struct *task); ++void gr_set_role_label(struct task_struct *task, const uid_t uid, ++ const gid_t gid); ++int gr_set_proc_label(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const int unsafe_flags); ++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_open(const struct dentry *dentry, ++ const struct vfsmount *mnt, int acc_mode); ++__u32 gr_acl_handle_creat(const struct dentry *dentry, ++ const struct dentry *p_dentry, ++ const struct vfsmount *p_mnt, ++ int open_flags, int acc_mode, const int imode); ++void gr_handle_create(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_proc_create(const struct dentry *dentry, ++ const struct inode *inode); ++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const int mode); ++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt); ++__u32 gr_acl_handle_rmdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_delete(const ino_t ino, const dev_t dev); ++__u32 gr_acl_handle_unlink(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const char *from); ++__u32 gr_acl_handle_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt, const char *to); ++int gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const char *newname); ++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace); ++__u32 gr_check_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt); ++int gr_acl_handle_filldir(const struct file *file, const char *name, ++ const unsigned int namelen, const ino_t ino); ++ ++__u32 gr_acl_handle_unix(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_acl_handle_exit(void); ++void gr_acl_handle_psacct(struct task_struct *task, const long code); ++int gr_acl_handle_procpidmem(const struct task_struct *task); ++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags); ++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode); ++void gr_audit_ptrace(struct task_struct *task); ++dev_t gr_get_dev_from_dentry(struct dentry *dentry); ++ ++int gr_ptrace_readexec(struct file *file, int unsafe_flags); ++ ++#ifdef CONFIG_GRKERNSEC ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p); ++void gr_handle_vm86(void); ++void gr_handle_mem_readwrite(u64 from, u64 to); ++ ++void gr_log_badprocpid(const char *entry); ++ ++extern int grsec_enable_dmesg; ++extern int grsec_disable_privio; ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++extern int grsec_enable_chroot_findtask; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern int grsec_enable_setxid; ++#endif ++#endif ++ ++#endif +diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h +index 6a87154..a3ce57b 100644 +--- a/include/linux/hdpu_features.h ++++ b/include/linux/hdpu_features.h +@@ -3,7 +3,7 @@ + struct cpustate_t { + spinlock_t lock; + int excl; +- int open_count; ++ atomic_t open_count; + unsigned char cached_val; + int inited; + unsigned long *set_addr; +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index 211ff44..00ab6d7 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page) + kunmap_atomic(kaddr, KM_USER0); + } + ++static inline void sanitize_highpage(struct page *page) ++{ ++ void *kaddr; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ kaddr = kmap_atomic(page, KM_CLEARPAGE); ++ clear_page(kaddr); ++ kunmap_atomic(kaddr, KM_CLEARPAGE); ++ local_irq_restore(flags); ++} ++ + static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +diff --git a/include/linux/i2c.h b/include/linux/i2c.h +index 7b40cda..24eb44e 100644 +--- a/include/linux/i2c.h ++++ b/include/linux/i2c.h +@@ -325,6 +325,7 @@ struct i2c_algorithm { + /* To determine what the adapter supports */ + u32 (*functionality) (struct i2c_adapter *); + }; ++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const; + + /* + * i2c_adapter is the structure used to identify a physical i2c bus along +diff --git a/include/linux/i2o.h b/include/linux/i2o.h +index 4c4e57d..f3c5303 100644 +--- a/include/linux/i2o.h ++++ b/include/linux/i2o.h +@@ -564,7 +564,7 @@ struct i2o_controller { + struct i2o_device *exec; /* Executive */ + #if BITS_PER_LONG == 64 + spinlock_t context_list_lock; /* lock for context_list */ +- atomic_t context_list_counter; /* needed for unique contexts */ ++ atomic_unchecked_t context_list_counter; /* needed for unique contexts */ + struct list_head context_list; /* list of context id's + and pointers */ + #endif +diff --git a/include/linux/init_task.h b/include/linux/init_task.h +index 21a6f5d..dc42eab 100644 +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h +@@ -83,6 +83,12 @@ extern struct group_info init_groups; + #define INIT_IDS + #endif + ++#ifdef CONFIG_X86 ++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO, ++#else ++#define INIT_TASK_THREAD_INFO ++#endif ++ + #ifdef CONFIG_SECURITY_FILE_CAPABILITIES + /* + * Because of the reduced scope of CAP_SETPCAP when filesystem +@@ -156,6 +162,7 @@ extern struct cred init_cred; + __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ + .comm = "swapper", \ + .thread = INIT_THREAD, \ ++ INIT_TASK_THREAD_INFO \ + .fs = &init_fs, \ + .files = &init_files, \ + .signal = &init_signals, \ +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 4f0a72a..a849599 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -296,7 +296,7 @@ struct iommu_flush { + u8 fm, u64 type); + void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +-}; ++} __no_const; + + enum { + SR_DMAR_FECTL_REG, +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index c739150..be577b5 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -369,7 +369,7 @@ enum + /* map softirq index to softirq name. update 'softirq_to_name' in + * kernel/softirq.c when adding a new softirq. + */ +-extern char *softirq_to_name[NR_SOFTIRQS]; ++extern const char * const softirq_to_name[NR_SOFTIRQS]; + + /* softirq mask and active fields moved to irq_cpustat_t in + * asm/hardirq.h to get better cache usage. KAO +@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS]; + + struct softirq_action + { +- void (*action)(struct softirq_action *); ++ void (*action)(void); + }; + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +-extern void open_softirq(int nr, void (*action)(struct softirq_action *)); ++extern void open_softirq(int nr, void (*action)(void)); + extern void softirq_init(void); + #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) + extern void raise_softirq_irqoff(unsigned int nr); +diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h +index eb73632..19abfc1 100644 +--- a/include/linux/iocontext.h ++++ b/include/linux/iocontext.h +@@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) + return NULL; + } + ++struct task_struct; + #ifdef CONFIG_BLOCK + int put_io_context(struct io_context *ioc); +-void exit_io_context(void); ++void exit_io_context(struct task_struct *task); + struct io_context *get_io_context(gfp_t gfp_flags, int node); + struct io_context *alloc_io_context(gfp_t gfp_flags, int node); + void copy_io_context(struct io_context **pdst, struct io_context **psrc); + #else +-static inline void exit_io_context(void) ++static inline void exit_io_context(struct task_struct *task) + { + } + +diff --git a/include/linux/irq.h b/include/linux/irq.h +index 9e5f45a..025865b 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); + static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) + { ++#ifdef CONFIG_CPUMASK_OFFSTACK + gfp_t gfp = GFP_ATOMIC; + + if (boot) + gfp = GFP_NOWAIT; + +-#ifdef CONFIG_CPUMASK_OFFSTACK + if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) + return false; + +diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h +index 7922742..27306a2 100644 +--- a/include/linux/kallsyms.h ++++ b/include/linux/kallsyms.h +@@ -15,7 +15,8 @@ + + struct module; + +-#ifdef CONFIG_KALLSYMS ++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS) ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + /* Lookup the address for a symbol. Returns 0 if not found. */ + unsigned long kallsyms_lookup_name(const char *name); + +@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u + /* Stupid that this does nothing, but I didn't create this mess. */ + #define __print_symbol(fmt, addr) + #endif /*CONFIG_KALLSYMS*/ ++#else /* when included by kallsyms.c, vsnprintf.c, or ++ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */ ++extern void __print_symbol(const char *fmt, unsigned long address); ++extern int sprint_symbol(char *buffer, unsigned long address); ++const char *kallsyms_lookup(unsigned long addr, ++ unsigned long *symbolsize, ++ unsigned long *offset, ++ char **modname, char *namebuf); ++#endif + + /* This macro allows us to keep printk typechecking */ + static void __check_printsym_format(const char *fmt, ...) +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index 6adcc29..13369e8 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -74,8 +74,8 @@ void kgdb_breakpoint(void); + + extern int kgdb_connected; + +-extern atomic_t kgdb_setting_breakpoint; +-extern atomic_t kgdb_cpu_doing_single_step; ++extern atomic_unchecked_t kgdb_setting_breakpoint; ++extern atomic_unchecked_t kgdb_cpu_doing_single_step; + + extern struct task_struct *kgdb_usethread; + extern struct task_struct *kgdb_contthread; +@@ -235,7 +235,7 @@ struct kgdb_arch { + int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); + void (*remove_all_hw_break)(void); + void (*correct_hw_break)(void); +-}; ++} __do_const; + + /** + * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. +@@ -257,14 +257,14 @@ struct kgdb_io { + int (*init) (void); + void (*pre_exception) (void); + void (*post_exception) (void); +-}; ++} __do_const; + +-extern struct kgdb_arch arch_kgdb_ops; ++extern const struct kgdb_arch arch_kgdb_ops; + + extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); + +-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); +-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); ++extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops); ++extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops); + + extern int kgdb_hex2long(char **ptr, unsigned long *long_val); + extern int kgdb_mem2hex(char *mem, char *buf, int count); +diff --git a/include/linux/kmod.h b/include/linux/kmod.h +index 0546fe7..2a22bc1 100644 +--- a/include/linux/kmod.h ++++ b/include/linux/kmod.h +@@ -31,6 +31,8 @@ + * usually useless though. */ + extern int __request_module(bool wait, const char *name, ...) \ + __attribute__((format(printf, 2, 3))); ++extern int ___request_module(bool wait, char *param_name, const char *name, ...) \ ++ __attribute__((format(printf, 3, 4))); + #define request_module(mod...) __request_module(true, mod) + #define request_module_nowait(mod...) __request_module(false, mod) + #define try_then_request_module(x, mod...) \ +diff --git a/include/linux/kobject.h b/include/linux/kobject.h +index 58ae8e0..3950d3c 100644 +--- a/include/linux/kobject.h ++++ b/include/linux/kobject.h +@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); + + struct kobj_type { + void (*release)(struct kobject *kobj); +- struct sysfs_ops *sysfs_ops; ++ const struct sysfs_ops *sysfs_ops; + struct attribute **default_attrs; + }; + +@@ -118,9 +118,9 @@ struct kobj_uevent_env { + }; + + struct kset_uevent_ops { +- int (*filter)(struct kset *kset, struct kobject *kobj); +- const char *(*name)(struct kset *kset, struct kobject *kobj); +- int (*uevent)(struct kset *kset, struct kobject *kobj, ++ int (* const filter)(struct kset *kset, struct kobject *kobj); ++ const char *(* const name)(struct kset *kset, struct kobject *kobj); ++ int (* const uevent)(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env); + }; + +@@ -132,7 +132,7 @@ struct kobj_attribute { + const char *buf, size_t count); + }; + +-extern struct sysfs_ops kobj_sysfs_ops; ++extern const struct sysfs_ops kobj_sysfs_ops; + + /** + * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. +@@ -155,14 +155,14 @@ struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; +- struct kset_uevent_ops *uevent_ops; ++ const struct kset_uevent_ops *uevent_ops; + }; + + extern void kset_init(struct kset *kset); + extern int __must_check kset_register(struct kset *kset); + extern void kset_unregister(struct kset *kset); + extern struct kset * __must_check kset_create_and_add(const char *name, +- struct kset_uevent_ops *u, ++ const struct kset_uevent_ops *u, + struct kobject *parent_kobj); + + static inline struct kset *to_kset(struct kobject *kobj) +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index c728a50..752d821 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); + void vcpu_load(struct kvm_vcpu *vcpu); + void vcpu_put(struct kvm_vcpu *vcpu); + +-int kvm_init(void *opaque, unsigned int vcpu_size, ++int kvm_init(const void *opaque, unsigned int vcpu_size, + struct module *module); + void kvm_exit(void); + +@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg); + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); + +-int kvm_arch_init(void *opaque); ++int kvm_arch_init(const void *opaque); + void kvm_arch_exit(void); + + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +diff --git a/include/linux/libata.h b/include/linux/libata.h +index a069916..223edde 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -525,11 +525,11 @@ struct ata_ioports { + + struct ata_host { + spinlock_t lock; +- struct device *dev; ++ struct device *dev; + void __iomem * const *iomap; + unsigned int n_ports; + void *private_data; +- struct ata_port_operations *ops; ++ const struct ata_port_operations *ops; + unsigned long flags; + #ifdef CONFIG_ATA_ACPI + acpi_handle acpi_handle; +@@ -710,7 +710,7 @@ struct ata_link { + + struct ata_port { + struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ +- struct ata_port_operations *ops; ++ const struct ata_port_operations *ops; + spinlock_t *lock; + /* Flags owned by the EH context. Only EH should touch these once the + port is active */ +@@ -884,7 +884,7 @@ struct ata_port_operations { + * fields must be pointers. + */ + const struct ata_port_operations *inherits; +-}; ++} __do_const; + + struct ata_port_info { + unsigned long flags; +@@ -892,7 +892,7 @@ struct ata_port_info { + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; +- struct ata_port_operations *port_ops; ++ const struct ata_port_operations *port_ops; + void *private_data; + }; + +@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[]; + extern const unsigned long sata_deb_timing_hotplug[]; + extern const unsigned long sata_deb_timing_long[]; + +-extern struct ata_port_operations ata_dummy_port_ops; ++extern const struct ata_port_operations ata_dummy_port_ops; + extern const struct ata_port_info ata_dummy_port_info; + + static inline const unsigned long * +@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq, + struct scsi_host_template *sht); + extern void ata_host_detach(struct ata_host *host); + extern void ata_host_init(struct ata_host *, struct device *, +- unsigned long, struct ata_port_operations *); ++ unsigned long, const struct ata_port_operations *); + extern int ata_scsi_detect(struct scsi_host_template *sht); + extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); + extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); +diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h +index fbc48f8..0886e57 100644 +--- a/include/linux/lockd/bind.h ++++ b/include/linux/lockd/bind.h +@@ -23,13 +23,13 @@ struct svc_rqst; + * This is the set of functions for lockd->nfsd communication + */ + struct nlmsvc_binding { +- __be32 (*fopen)(struct svc_rqst *, ++ __be32 (* const fopen)(struct svc_rqst *, + struct nfs_fh *, + struct file **); +- void (*fclose)(struct file *); ++ void (* const fclose)(struct file *); + }; + +-extern struct nlmsvc_binding * nlmsvc_ops; ++extern const struct nlmsvc_binding * nlmsvc_ops; + + /* + * Similar to nfs_client_initdata, but without the NFS-specific +diff --git a/include/linux/mca.h b/include/linux/mca.h +index 3797270..7765ede 100644 +--- a/include/linux/mca.h ++++ b/include/linux/mca.h +@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions { + int region); + void * (*mca_transform_memory)(struct mca_device *, + void *memory); +-}; ++} __no_const; + + struct mca_bus { + u64 default_dma_mask; +diff --git a/include/linux/memory.h b/include/linux/memory.h +index 37fa19b..b597c85 100644 +--- a/include/linux/memory.h ++++ b/include/linux/memory.h +@@ -108,7 +108,7 @@ struct memory_accessor { + size_t count); + ssize_t (*write)(struct memory_accessor *, const char *buf, + off_t offset, size_t count); +-}; ++} __no_const; + + /* + * Kernel text modification mutex, used for code patching. Users of this lock +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 11e5be6..8ff8c91 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp); + + #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ + #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */ ++#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */ ++#else + #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ ++#endif ++ + #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ + #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ + +@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page); + int set_page_dirty_lock(struct page *page); + int clear_page_dirty_for_io(struct page *page); + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- + extern unsigned long move_page_tables(struct vm_area_struct *vma, + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len); +@@ -890,6 +891,8 @@ struct shrinker { + extern void register_shrinker(struct shrinker *); + extern void unregister_shrinker(struct shrinker *); + ++pgprot_t vm_get_page_prot(unsigned long vm_flags); ++ + int vma_wants_writenotify(struct vm_area_struct *vma); + + extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); +@@ -900,8 +903,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, + { + return 0; + } ++ ++static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, ++ unsigned long address) ++{ ++ return 0; ++} + #else + int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); ++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address); + #endif + + #ifdef __PAGETABLE_PMD_FOLDED +@@ -910,8 +920,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, + { + return 0; + } ++ ++static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, ++ unsigned long address) ++{ ++ return 0; ++} + #else + int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); ++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address); + #endif + + int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); +@@ -928,11 +945,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a + NULL: pud_offset(pgd, address); + } + ++static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address) ++{ ++ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))? ++ NULL: pud_offset(pgd, address); ++} ++ + static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) + { + return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? + NULL: pmd_offset(pud, address); + } ++ ++static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address) ++{ ++ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))? ++ NULL: pmd_offset(pud, address); ++} + #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ + + #if USE_SPLIT_PTLOCKS +@@ -1162,6 +1191,7 @@ out: + } + + extern int do_munmap(struct mm_struct *, unsigned long, size_t); ++extern int __do_munmap(struct mm_struct *, unsigned long, size_t); + + extern unsigned long do_brk(unsigned long, unsigned long); + +@@ -1218,6 +1248,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add + extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + ++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma); ++extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma); ++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl); ++ + /* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ + static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +@@ -1234,7 +1268,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + } + +-pgprot_t vm_get_page_prot(unsigned long vm_flags); + struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); + int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); +@@ -1332,7 +1365,13 @@ extern void memory_failure(unsigned long pfn, int trapno); + extern int __memory_failure(unsigned long pfn, int trapno, int ref); + extern int sysctl_memory_failure_early_kill; + extern int sysctl_memory_failure_recovery; +-extern atomic_long_t mce_bad_pages; ++extern atomic_long_unchecked_t mce_bad_pages; ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot); ++#else ++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {} ++#endif + + #endif /* __KERNEL__ */ + #endif /* _LINUX_MM_H */ +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 9d12ed5..c5e5ab6 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -186,6 +186,8 @@ struct vm_area_struct { + #ifdef CONFIG_NUMA + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ + #endif ++ ++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */ + }; + + struct core_thread { +@@ -235,7 +237,7 @@ struct mm_struct { + unsigned long total_vm, locked_vm, shared_vm, exec_vm; + unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; + unsigned long start_code, end_code, start_data, end_data; +- unsigned long start_brk, brk, start_stack; ++ unsigned long brk_gap, start_brk, brk, start_stack; + unsigned long arg_start, arg_end, env_start, env_end; + + unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ +@@ -287,6 +289,24 @@ struct mm_struct { + #ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; + #endif ++ ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++ unsigned long pax_flags; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ unsigned long call_dl_resolve; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ unsigned long call_syscall; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ unsigned long delta_mmap; /* randomized offset */ ++ unsigned long delta_stack; /* randomized offset */ ++#endif ++ + }; + + /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ +diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h +index 4e02ee2..afb159e 100644 +--- a/include/linux/mmu_notifier.h ++++ b/include/linux/mmu_notifier.h +@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) + */ + #define ptep_clear_flush_notify(__vma, __address, __ptep) \ + ({ \ +- pte_t __pte; \ ++ pte_t ___pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ +- __pte = ptep_clear_flush(___vma, ___address, __ptep); \ ++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ +- __pte; \ ++ ___pte; \ + }) + + #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index 6c31a2a..4b0e930 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -350,7 +350,7 @@ struct zone { + unsigned long flags; /* zone flags, see below */ + + /* Zone statistics */ +- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + /* + * prev_priority holds the scanning priority for this zone. It is +diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h +index f58e9d8..3503935 100644 +--- a/include/linux/mod_devicetable.h ++++ b/include/linux/mod_devicetable.h +@@ -12,7 +12,7 @@ + typedef unsigned long kernel_ulong_t; + #endif + +-#define PCI_ANY_ID (~0) ++#define PCI_ANY_ID ((__u16)~0) + + struct pci_device_id { + __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ +@@ -131,7 +131,7 @@ struct usb_device_id { + #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100 + #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 + +-#define HID_ANY_ID (~0) ++#define HID_ANY_ID (~0U) + + struct hid_device_id { + __u16 bus; +diff --git a/include/linux/module.h b/include/linux/module.h +index 482efc8..642032b 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -16,6 +16,7 @@ + #include <linux/kobject.h> + #include <linux/moduleparam.h> + #include <linux/tracepoint.h> ++#include <linux/fs.h> + + #include <asm/local.h> + #include <asm/module.h> +@@ -287,16 +288,16 @@ struct module + int (*init)(void); + + /* If this is non-NULL, vfree after init() returns */ +- void *module_init; ++ void *module_init_rx, *module_init_rw; + + /* Here is the actual code + data, vfree'd on unload. */ +- void *module_core; ++ void *module_core_rx, *module_core_rw; + + /* Here are the sizes of the init and core sections */ +- unsigned int init_size, core_size; ++ unsigned int init_size_rw, core_size_rw; + + /* The size of the executable code in each section. */ +- unsigned int init_text_size, core_text_size; ++ unsigned int init_size_rx, core_size_rx; + + /* Arch-specific module values */ + struct mod_arch_specific arch; +@@ -345,6 +346,10 @@ struct module + #ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call *trace_events; + unsigned int num_trace_events; ++ struct file_operations trace_id; ++ struct file_operations trace_enable; ++ struct file_operations trace_format; ++ struct file_operations trace_filter; + #endif + #ifdef CONFIG_FTRACE_MCOUNT_RECORD + unsigned long *ftrace_callsites; +@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr); + bool is_module_address(unsigned long addr); + bool is_module_text_address(unsigned long addr); + ++static inline int within_module_range(unsigned long addr, void *start, unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (ktla_ktva(addr) >= (unsigned long)start && ++ ktla_ktva(addr) < (unsigned long)start + size) ++ return 1; ++#endif ++ ++ return ((void *)addr >= start && (void *)addr < start + size); ++} ++ ++static inline int within_module_core_rx(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx); ++} ++ ++static inline int within_module_core_rw(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw); ++} ++ ++static inline int within_module_init_rx(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx); ++} ++ ++static inline int within_module_init_rw(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw); ++} ++ + static inline int within_module_core(unsigned long addr, struct module *mod) + { +- return (unsigned long)mod->module_core <= addr && +- addr < (unsigned long)mod->module_core + mod->core_size; ++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod); + } + + static inline int within_module_init(unsigned long addr, struct module *mod) + { +- return (unsigned long)mod->module_init <= addr && +- addr < (unsigned long)mod->module_init + mod->init_size; ++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod); + } + + /* Search for module by name: must hold module_mutex. */ +diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h +index c1f40c2..f26b93c 100644 +--- a/include/linux/moduleloader.h ++++ b/include/linux/moduleloader.h +@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); + sections. Returns NULL on failure. */ + void *module_alloc(unsigned long size); + ++#ifdef CONFIG_PAX_KERNEXEC ++void *module_alloc_exec(unsigned long size) __size_overflow(1); ++#else ++#define module_alloc_exec(x) module_alloc(x) ++#endif ++ + /* Free memory returned from module_alloc. */ + void module_free(struct module *mod, void *module_region); + ++#ifdef CONFIG_PAX_KERNEXEC ++void module_free_exec(struct module *mod, void *module_region); ++#else ++#define module_free_exec(x, y) module_free((x), (y)) ++#endif ++ + /* Apply the given relocation to the (simplified) ELF. Return -error + or 0. */ + int apply_relocate(Elf_Shdr *sechdrs, +diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h +index 82a9124..8a5f622 100644 +--- a/include/linux/moduleparam.h ++++ b/include/linux/moduleparam.h +@@ -132,7 +132,7 @@ struct kparam_array + + /* Actually copy string: maxlen param is usually sizeof(string). */ + #define module_param_string(name, string, len, perm) \ +- static const struct kparam_string __param_string_##name \ ++ static const struct kparam_string __param_string_##name __used \ + = { len, string }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + param_set_copystring, param_get_string, \ +@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp); + + /* Comma-separated array: *nump is set to number they actually specified. */ + #define module_param_array_named(name, array, type, nump, perm) \ +- static const struct kparam_array __param_arr_##name \ ++ static const struct kparam_array __param_arr_##name __used \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ + sizeof(array[0]), array }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ +diff --git a/include/linux/mutex.h b/include/linux/mutex.h +index 878cab4..c92cb3e 100644 +--- a/include/linux/mutex.h ++++ b/include/linux/mutex.h +@@ -51,7 +51,7 @@ struct mutex { + spinlock_t wait_lock; + struct list_head wait_list; + #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) +- struct thread_info *owner; ++ struct task_struct *owner; + #endif + #ifdef CONFIG_DEBUG_MUTEXES + const char *name; +diff --git a/include/linux/namei.h b/include/linux/namei.h +index ec0f607..d19e675 100644 +--- a/include/linux/namei.h ++++ b/include/linux/namei.h +@@ -22,7 +22,7 @@ struct nameidata { + unsigned int flags; + int last_type; + unsigned depth; +- char *saved_names[MAX_NESTED_LINKS + 1]; ++ const char *saved_names[MAX_NESTED_LINKS + 1]; + + /* Intent data */ + union { +@@ -84,12 +84,12 @@ extern int follow_up(struct path *); + extern struct dentry *lock_rename(struct dentry *, struct dentry *); + extern void unlock_rename(struct dentry *, struct dentry *); + +-static inline void nd_set_link(struct nameidata *nd, char *path) ++static inline void nd_set_link(struct nameidata *nd, const char *path) + { + nd->saved_names[nd->depth] = path; + } + +-static inline char *nd_get_link(struct nameidata *nd) ++static inline const char *nd_get_link(const struct nameidata *nd) + { + return nd->saved_names[nd->depth]; + } +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 9d7e8f7..04428c5 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -637,6 +637,7 @@ struct net_device_ops { + u16 xid); + #endif + }; ++typedef struct net_device_ops __no_const net_device_ops_no_const; + + /* + * The DEVICE structure. +diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h +new file mode 100644 +index 0000000..33f4af8 +--- /dev/null ++++ b/include/linux/netfilter/xt_gradm.h +@@ -0,0 +1,9 @@ ++#ifndef _LINUX_NETFILTER_XT_GRADM_H ++#define _LINUX_NETFILTER_XT_GRADM_H 1 ++ ++struct xt_gradm_mtinfo { ++ __u16 flags; ++ __u16 invflags; ++}; ++ ++#endif +diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h +index b359c4a..c08b334 100644 +--- a/include/linux/nodemask.h ++++ b/include/linux/nodemask.h +@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state) + + #define any_online_node(mask) \ + ({ \ +- int node; \ +- for_each_node_mask(node, (mask)) \ +- if (node_online(node)) \ ++ int __node; \ ++ for_each_node_mask(__node, (mask)) \ ++ if (node_online(__node)) \ + break; \ +- node; \ ++ __node; \ + }) + + #define num_online_nodes() num_node_state(N_ONLINE) +diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h +index 5171639..7cf4235 100644 +--- a/include/linux/oprofile.h ++++ b/include/linux/oprofile.h +@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, + int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, + char const * name, ulong * val); + +-/** Create a file for read-only access to an atomic_t. */ ++/** Create a file for read-only access to an atomic_unchecked_t. */ + int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, +- char const * name, atomic_t * val); ++ char const * name, atomic_unchecked_t * val); + + /** create a directory */ + struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root, +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index 3c62ed4..8924c7c 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) + if (((unsigned long)uaddr & PAGE_MASK) != + ((unsigned long)end & PAGE_MASK)) + ret = __get_user(c, end); ++ (void)c; + } ++ (void)c; + return ret; + } + +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index 81c9689..a567a55 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -476,7 +476,7 @@ struct hw_perf_event { + struct hrtimer hrtimer; + }; + }; +- atomic64_t prev_count; ++ atomic64_unchecked_t prev_count; + u64 sample_period; + u64 last_period; + atomic64_t period_left; +@@ -557,7 +557,7 @@ struct perf_event { + const struct pmu *pmu; + + enum perf_event_active_state state; +- atomic64_t count; ++ atomic64_unchecked_t count; + + /* + * These are the total time in nanoseconds that the event +@@ -595,8 +595,8 @@ struct perf_event { + * These accumulate total time (in nanoseconds) that children + * events have been enabled and running, respectively. + */ +- atomic64_t child_total_time_enabled; +- atomic64_t child_total_time_running; ++ atomic64_unchecked_t child_total_time_enabled; ++ atomic64_unchecked_t child_total_time_running; + + /* + * Protect attach/detach and child_list: +diff --git a/include/linux/personality.h b/include/linux/personality.h +index 1261208..ddef96fb 100644 +--- a/include/linux/personality.h ++++ b/include/linux/personality.h +@@ -43,6 +43,7 @@ enum { + #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \ + ADDR_NO_RANDOMIZE | \ + ADDR_COMPAT_LAYOUT | \ ++ ADDR_LIMIT_3GB | \ + MMAP_PAGE_ZERO) + + /* +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h +index b43a9e0..b77d869 100644 +--- a/include/linux/pipe_fs_i.h ++++ b/include/linux/pipe_fs_i.h +@@ -46,9 +46,9 @@ struct pipe_inode_info { + wait_queue_head_t wait; + unsigned int nrbufs, curbuf; + struct page *tmp_page; +- unsigned int readers; +- unsigned int writers; +- unsigned int waiting_writers; ++ atomic_t readers; ++ atomic_t writers; ++ atomic_t waiting_writers; + unsigned int r_counter; + unsigned int w_counter; + struct fasync_struct *fasync_readers; +diff --git a/include/linux/poison.h b/include/linux/poison.h +index 34066ff..e95d744 100644 +--- a/include/linux/poison.h ++++ b/include/linux/poison.h +@@ -19,8 +19,8 @@ + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) +-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) ++#define LIST_POISON1 ((void *) (long)0xFFFFFF01) ++#define LIST_POISON2 ((void *) (long)0xFFFFFF02) + + /********** include/linux/timer.h **********/ + /* +diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h +index 4f71bf4..cd2f68e 100644 +--- a/include/linux/posix-timers.h ++++ b/include/linux/posix-timers.h +@@ -82,7 +82,8 @@ struct k_clock { + #define TIMER_RETRY 1 + void (*timer_get) (struct k_itimer * timr, + struct itimerspec * cur_setting); +-}; ++} __do_const; ++typedef struct k_clock __no_const k_clock_no_const; + + void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock); + +diff --git a/include/linux/preempt.h b/include/linux/preempt.h +index 72b1a10..13303a9 100644 +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -110,7 +110,7 @@ struct preempt_ops { + void (*sched_in)(struct preempt_notifier *notifier, int cpu); + void (*sched_out)(struct preempt_notifier *notifier, + struct task_struct *next); +-}; ++} __no_const; + + /** + * preempt_notifier - key for installing preemption notifiers +diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h +index af7c36a..a93005c 100644 +--- a/include/linux/prefetch.h ++++ b/include/linux/prefetch.h +@@ -11,6 +11,7 @@ + #define _LINUX_PREFETCH_H + + #include <linux/types.h> ++#include <linux/const.h> + #include <asm/processor.h> + #include <asm/cache.h> + +diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h +index 379eaed..1bf73e3 100644 +--- a/include/linux/proc_fs.h ++++ b/include/linux/proc_fs.h +@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode, + return proc_create_data(name, mode, parent, proc_fops, NULL); + } + ++static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode, ++ struct proc_dir_entry *parent, const struct file_operations *proc_fops) ++{ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL); ++#else ++ return proc_create_data(name, mode, parent, proc_fops, NULL); ++#endif ++} ++ ++ + static inline struct proc_dir_entry *create_proc_read_entry(const char *name, + mode_t mode, struct proc_dir_entry *base, + read_proc_t *read_proc, void * data) +@@ -256,7 +269,7 @@ union proc_op { + int (*proc_show)(struct seq_file *m, + struct pid_namespace *ns, struct pid *pid, + struct task_struct *task); +-}; ++} __no_const; + + struct ctl_table_header; + struct ctl_table; +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h +index 7456d7d..6c1cfc9 100644 +--- a/include/linux/ptrace.h ++++ b/include/linux/ptrace.h +@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child); + extern void exit_ptrace(struct task_struct *tracer); + #define PTRACE_MODE_READ 1 + #define PTRACE_MODE_ATTACH 2 +-/* Returns 0 on success, -errno on denial. */ +-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); + /* Returns true on success, false on denial. */ + extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); ++/* Returns true on success, false on denial. */ ++extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode); + + static inline int ptrace_reparented(struct task_struct *child) + { +diff --git a/include/linux/random.h b/include/linux/random.h +index 2948046..3262567 100644 +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l + u32 random32(void); + void srandom32(u32 seed); + ++static inline unsigned long pax_get_random_long(void) ++{ ++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0); ++} ++ + #endif /* __KERNEL___ */ + + #endif /* _LINUX_RANDOM_H */ +diff --git a/include/linux/reboot.h b/include/linux/reboot.h +index 988e55f..17cb4ef 100644 +--- a/include/linux/reboot.h ++++ b/include/linux/reboot.h +@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *); + * Architecture-specific implementations of sys_reboot commands. + */ + +-extern void machine_restart(char *cmd); +-extern void machine_halt(void); +-extern void machine_power_off(void); ++extern void machine_restart(char *cmd) __noreturn; ++extern void machine_halt(void) __noreturn; ++extern void machine_power_off(void) __noreturn; + + extern void machine_shutdown(void); + struct pt_regs; +@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *); + */ + + extern void kernel_restart_prepare(char *cmd); +-extern void kernel_restart(char *cmd); +-extern void kernel_halt(void); +-extern void kernel_power_off(void); ++extern void kernel_restart(char *cmd) __noreturn; ++extern void kernel_halt(void) __noreturn; ++extern void kernel_power_off(void) __noreturn; + + void ctrl_alt_del(void); + +@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force); + * Emergency restart, callable from an interrupt handler. + */ + +-extern void emergency_restart(void); ++extern void emergency_restart(void) __noreturn; + #include <asm/emergency-restart.h> + + #endif +diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h +index dd31e7b..5b03c5c 100644 +--- a/include/linux/reiserfs_fs.h ++++ b/include/linux/reiserfs_fs.h +@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode) + #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */ + + #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter) +-#define get_generation(s) atomic_read (&fs_generation(s)) ++#define get_generation(s) atomic_read_unchecked (&fs_generation(s)) + #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen) + #define __fs_changed(gen,s) (gen != get_generation (s)) + #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);}) +@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi) + */ + + struct item_operations { +- int (*bytes_number) (struct item_head * ih, int block_size); +- void (*decrement_key) (struct cpu_key *); +- int (*is_left_mergeable) (struct reiserfs_key * ih, ++ int (* const bytes_number) (struct item_head * ih, int block_size); ++ void (* const decrement_key) (struct cpu_key *); ++ int (* const is_left_mergeable) (struct reiserfs_key * ih, + unsigned long bsize); +- void (*print_item) (struct item_head *, char *item); +- void (*check_item) (struct item_head *, char *item); ++ void (* const print_item) (struct item_head *, char *item); ++ void (* const check_item) (struct item_head *, char *item); + +- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi, ++ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi, + int is_affected, int insert_size); +- int (*check_left) (struct virtual_item * vi, int free, ++ int (* const check_left) (struct virtual_item * vi, int free, + int start_skip, int end_skip); +- int (*check_right) (struct virtual_item * vi, int free); +- int (*part_size) (struct virtual_item * vi, int from, int to); +- int (*unit_num) (struct virtual_item * vi); +- void (*print_vi) (struct virtual_item * vi); ++ int (* const check_right) (struct virtual_item * vi, int free); ++ int (* const part_size) (struct virtual_item * vi, int from, int to); ++ int (* const unit_num) (struct virtual_item * vi); ++ void (* const print_vi) (struct virtual_item * vi); + }; + +-extern struct item_operations *item_ops[TYPE_ANY + 1]; ++extern const struct item_operations * const item_ops[TYPE_ANY + 1]; + + #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize) + #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize) +diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h +index dab68bb..0688727 100644 +--- a/include/linux/reiserfs_fs_sb.h ++++ b/include/linux/reiserfs_fs_sb.h +@@ -377,7 +377,7 @@ struct reiserfs_sb_info { + /* Comment? -Hans */ + wait_queue_head_t s_wait; + /* To be obsoleted soon by per buffer seals.. -Hans */ +- atomic_t s_generation_counter; // increased by one every time the ++ atomic_unchecked_t s_generation_counter; // increased by one every time the + // tree gets re-balanced + unsigned long s_properties; /* File system properties. Currently holds + on-disk FS format */ +diff --git a/include/linux/relay.h b/include/linux/relay.h +index 14a86bc..17d0700 100644 +--- a/include/linux/relay.h ++++ b/include/linux/relay.h +@@ -159,7 +159,7 @@ struct rchan_callbacks + * The callback should return 0 if successful, negative if not. + */ + int (*remove_buf_file)(struct dentry *dentry); +-}; ++} __no_const; + + /* + * CONFIG_RELAY kernel API, kernel/relay.c +diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h +index 3392c59..a746428 100644 +--- a/include/linux/rfkill.h ++++ b/include/linux/rfkill.h +@@ -144,6 +144,7 @@ struct rfkill_ops { + void (*query)(struct rfkill *rfkill, void *data); + int (*set_block)(void *data, bool blocked); + }; ++typedef struct rfkill_ops __no_const rfkill_ops_no_const; + + #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) + /** +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 71849bf..8cf9dd2 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -101,6 +101,7 @@ struct bio; + struct fs_struct; + struct bts_context; + struct perf_event_context; ++struct linux_binprm; + + /* + * List of flags we want to share for kernel threads, +@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout); + extern signed long schedule_timeout_uninterruptible(signed long timeout); + asmlinkage void __schedule(void); + asmlinkage void schedule(void); +-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); ++extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); + + struct nsproxy; + struct user_namespace; +@@ -371,9 +372,12 @@ struct user_namespace; + #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + + extern int sysctl_max_map_count; ++extern unsigned long sysctl_heap_stack_gap; + + #include <linux/aio.h> + ++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len); ++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len); + extern unsigned long + arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +@@ -666,6 +670,16 @@ struct signal_struct { + struct tty_audit_buf *tty_audit_buf; + #endif + ++#ifdef CONFIG_GRKERNSEC ++ u32 curr_ip; ++ u32 saved_ip; ++ u32 gr_saddr; ++ u32 gr_daddr; ++ u16 gr_sport; ++ u16 gr_dport; ++ u8 used_accept:1; ++#endif ++ + int oom_adj; /* OOM kill score adjustment (bit shift) */ + }; + +@@ -723,6 +737,11 @@ struct user_struct { + struct key *session_keyring; /* UID's default session keyring */ + #endif + ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ unsigned int banned; ++ unsigned long ban_expires; ++#endif ++ + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + uid_t uid; +@@ -1328,8 +1347,8 @@ struct task_struct { + struct list_head thread_group; + + struct completion *vfork_done; /* for vfork() */ +- int __user *set_child_tid; /* CLONE_CHILD_SETTID */ +- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ ++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */ ++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; +@@ -1343,16 +1362,6 @@ struct task_struct { + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; + +-/* process credentials */ +- const struct cred *real_cred; /* objective and real subjective task +- * credentials (COW) */ +- const struct cred *cred; /* effective (overridable) subjective task +- * credentials (COW) */ +- struct mutex cred_guard_mutex; /* guard against foreign influences on +- * credential calculations +- * (notably. ptrace) */ +- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ +- + char comm[TASK_COMM_LEN]; /* executable name excluding path + - access with [gs]et_task_comm (which lock + it with task_lock()) +@@ -1369,6 +1378,10 @@ struct task_struct { + #endif + /* CPU-specific state of this task */ + struct thread_struct thread; ++/* thread_info moved to task_struct */ ++#ifdef CONFIG_X86 ++ struct thread_info tinfo; ++#endif + /* filesystem information */ + struct fs_struct *fs; + /* open file information */ +@@ -1436,6 +1449,15 @@ struct task_struct { + int hardirq_context; + int softirq_context; + #endif ++ ++/* process credentials */ ++ const struct cred *real_cred; /* objective and real subjective task ++ * credentials (COW) */ ++ struct mutex cred_guard_mutex; /* guard against foreign influences on ++ * credential calculations ++ * (notably. ptrace) */ ++ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ ++ + #ifdef CONFIG_LOCKDEP + # define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; +@@ -1456,6 +1478,9 @@ struct task_struct { + + struct backing_dev_info *backing_dev_info; + ++ const struct cred *cred; /* effective (overridable) subjective task ++ * credentials (COW) */ ++ + struct io_context *io_context; + + unsigned long ptrace_message; +@@ -1519,6 +1544,27 @@ struct task_struct { + unsigned long default_timer_slack_ns; + + struct list_head *scm_work_list; ++ ++#ifdef CONFIG_GRKERNSEC ++ /* grsecurity */ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ u64 exec_id; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ const struct cred *delayed_cred; ++#endif ++ struct dentry *gr_chroot_dentry; ++ struct acl_subject_label *acl; ++ struct acl_role_label *role; ++ struct file *exec_file; ++ u16 acl_role_id; ++ /* is this the task that authenticated to the special role */ ++ u8 acl_sp_role; ++ u8 is_writable; ++ u8 brute; ++ u8 gr_is_chrooted; ++#endif ++ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored adress in ret_stack */ + int curr_ret_stack; +@@ -1542,6 +1588,57 @@ struct task_struct { + #endif /* CONFIG_TRACING */ + }; + ++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ ++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ ++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ ++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ ++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */ ++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ ++ ++#ifdef CONFIG_PAX_SOFTMODE ++extern int pax_softmode; ++#endif ++ ++extern int pax_check_flags(unsigned long *); ++ ++/* if tsk != current then task_lock must be held on it */ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline unsigned long pax_get_flags(struct task_struct *tsk) ++{ ++ if (likely(tsk->mm)) ++ return tsk->mm->pax_flags; ++ else ++ return 0UL; ++} ++ ++/* if tsk != current then task_lock must be held on it */ ++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags) ++{ ++ if (likely(tsk->mm)) { ++ tsk->mm->pax_flags = flags; ++ return 0; ++ } ++ return -EINVAL; ++} ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++extern void pax_set_initial_flags(struct linux_binprm *bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++#endif ++ ++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); ++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp); ++extern void pax_report_refcount_overflow(struct pt_regs *regs); ++extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type); ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++extern void pax_track_stack(void); ++#else ++static inline void pax_track_stack(void) {} ++#endif ++ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) + +@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * + #define PF_DUMPCORE 0x00000200 /* dumped core */ + #define PF_SIGNALED 0x00000400 /* killed by a signal */ + #define PF_MEMALLOC 0x00000800 /* Allocating memory */ +-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ ++#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ + #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ + #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ + #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ +@@ -1978,7 +2075,9 @@ void yield(void); + extern struct exec_domain default_exec_domain; + + union thread_union { ++#ifndef CONFIG_X86 + struct thread_info thread_info; ++#endif + unsigned long stack[THREAD_SIZE/sizeof(long)]; + }; + +@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns; + */ + + extern struct task_struct *find_task_by_vpid(pid_t nr); ++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr); + extern struct task_struct *find_task_by_pid_ns(pid_t nr, + struct pid_namespace *ns); + +@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *); + extern void exit_itimers(struct signal_struct *); + extern void flush_itimer_signals(void); + +-extern NORET_TYPE void do_group_exit(int); ++extern __noreturn void do_group_exit(int); + + extern void daemonize(const char *, ...); + extern int allow_signal(int); +@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p) + + #endif + +-static inline int object_is_on_stack(void *obj) ++static inline int object_starts_on_stack(void *obj) + { +- void *stack = task_stack_page(current); ++ const void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); + } + ++#ifdef CONFIG_PAX_USERCOPY ++extern int object_is_on_stack(const void *obj, unsigned long len); ++#endif ++ + extern void thread_info_cache_init(void); + + #ifdef CONFIG_DEBUG_STACK_USAGE +@@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit) + return task_rlimit_max(current, limit); + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++DECLARE_PER_CPU(u64, exec_counter); ++static inline void increment_exec_counter(void) ++{ ++ unsigned int cpu; ++ u64 *exec_id_ptr; ++ BUILD_BUG_ON(NR_CPUS > (1 << 16)); ++ cpu = get_cpu(); ++ exec_id_ptr = &per_cpu(exec_counter, cpu); ++ *exec_id_ptr += 1ULL << 16; ++ current->exec_id = *exec_id_ptr; ++ put_cpu(); ++} ++#else ++static inline void increment_exec_counter(void) {} ++#endif ++ + #endif /* __KERNEL__ */ + + #endif +diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h +index 1ee2c05..81b7ec4 100644 +--- a/include/linux/screen_info.h ++++ b/include/linux/screen_info.h +@@ -42,7 +42,8 @@ struct screen_info { + __u16 pages; /* 0x32 */ + __u16 vesa_attributes; /* 0x34 */ + __u32 capabilities; /* 0x36 */ +- __u8 _reserved[6]; /* 0x3a */ ++ __u16 vesapm_size; /* 0x3a */ ++ __u8 _reserved[4]; /* 0x3c */ + } __attribute__((packed)); + + #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ +diff --git a/include/linux/security.h b/include/linux/security.h +index d40d23f..d739b08 100644 +--- a/include/linux/security.h ++++ b/include/linux/security.h +@@ -34,6 +34,7 @@ + #include <linux/key.h> + #include <linux/xfrm.h> + #include <linux/gfp.h> ++#include <linux/grsecurity.h> + #include <net/flow.h> + + /* Maximum number of letters for an LSM name string */ +@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, + extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); + extern int cap_task_setioprio(struct task_struct *p, int ioprio); + extern int cap_task_setnice(struct task_struct *p, int nice); +-extern int cap_syslog(int type); ++extern int cap_syslog(int type, bool from_file); + extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); + + struct msghdr; +@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) + * logging to the console. + * See the syslog(2) manual page for an explanation of the @type values. + * @type contains the type of action. ++ * @from_file indicates the context of action (if it came from /proc). + * Return 0 if permission is granted. + * @settime: + * Check permission to change the system time. +@@ -1445,7 +1447,7 @@ struct security_operations { + int (*sysctl) (struct ctl_table *table, int op); + int (*quotactl) (int cmds, int type, int id, struct super_block *sb); + int (*quota_on) (struct dentry *dentry); +- int (*syslog) (int type); ++ int (*syslog) (int type, bool from_file); + int (*settime) (struct timespec *ts, struct timezone *tz); + int (*vm_enough_memory) (struct mm_struct *mm, long pages); + +@@ -1740,7 +1742,7 @@ int security_acct(struct file *file); + int security_sysctl(struct ctl_table *table, int op); + int security_quotactl(int cmds, int type, int id, struct super_block *sb); + int security_quota_on(struct dentry *dentry); +-int security_syslog(int type); ++int security_syslog(int type, bool from_file); + int security_settime(struct timespec *ts, struct timezone *tz); + int security_vm_enough_memory(long pages); + int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); +@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry) + return 0; + } + +-static inline int security_syslog(int type) ++static inline int security_syslog(int type, bool from_file) + { +- return cap_syslog(type); ++ return cap_syslog(type, from_file); + } + + static inline int security_settime(struct timespec *ts, struct timezone *tz) +diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h +index 8366d8f..cc5f9d6 100644 +--- a/include/linux/seq_file.h ++++ b/include/linux/seq_file.h +@@ -23,6 +23,9 @@ struct seq_file { + u64 version; + struct mutex lock; + const struct seq_operations *op; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ u64 exec_id; ++#endif + void *private; + }; + +@@ -32,6 +35,7 @@ struct seq_operations { + void * (*next) (struct seq_file *m, void *v, loff_t *pos); + int (*show) (struct seq_file *m, void *v); + }; ++typedef struct seq_operations __no_const seq_operations_no_const; + + #define SEQ_SKIP 1 + +diff --git a/include/linux/shm.h b/include/linux/shm.h +index eca6235..c7417ed 100644 +--- a/include/linux/shm.h ++++ b/include/linux/shm.h +@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */ + pid_t shm_cprid; + pid_t shm_lprid; + struct user_struct *mlock_user; ++#ifdef CONFIG_GRKERNSEC ++ time_t shm_createtime; ++ pid_t shm_lapid; ++#endif + }; + + /* shm_mode upper byte flags */ +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index bcdd660..fd2e332 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -14,6 +14,7 @@ + #ifndef _LINUX_SKBUFF_H + #define _LINUX_SKBUFF_H + ++#include <linux/const.h> + #include <linux/kernel.h> + #include <linux/kmemcheck.h> + #include <linux/compiler.h> +@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb) + */ + static inline int skb_queue_empty(const struct sk_buff_head *list) + { +- return list->next == (struct sk_buff *)list; ++ return list->next == (const struct sk_buff *)list; + } + + /** +@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) + static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) + { +- return (skb->next == (struct sk_buff *) list); ++ return (skb->next == (const struct sk_buff *) list); + } + + /** +@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list, + static inline bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) + { +- return (skb->prev == (struct sk_buff *) list); ++ return (skb->prev == (const struct sk_buff *) list); + } + + /** +@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb) + * headroom, you should not reduce this. + */ + #ifndef NET_SKB_PAD +-#define NET_SKB_PAD 32 ++#define NET_SKB_PAD (_AC(32,UL)) + #endif + + extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); +@@ -1489,6 +1490,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); + } + ++static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length, gfp_t gfp) ++{ ++ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); ++ ++ if (NET_IP_ALIGN && skb) ++ skb_reserve(skb, NET_IP_ALIGN); ++ return skb; ++} ++ ++static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length) ++{ ++ return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); ++} ++ + extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); + + /** +diff --git a/include/linux/slab.h b/include/linux/slab.h +index 2da8372..96b37db 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -11,12 +11,20 @@ + + #include <linux/gfp.h> + #include <linux/types.h> ++#include <linux/err.h> + + /* + * Flags to pass to kmem_cache_create(). + * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. + */ + #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ ++ ++#ifdef CONFIG_PAX_USERCOPY ++#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */ ++#else ++#define SLAB_USERCOPY 0x00000000UL ++#endif ++ + #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ + #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ + #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ +@@ -82,10 +90,13 @@ + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +-#define ZERO_SIZE_PTR ((void *)16) ++#define ZERO_SIZE_PTR \ ++({ \ ++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\ ++ (void *)(-MAX_ERRNO-1L); \ ++}) + +-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ +- (unsigned long)ZERO_SIZE_PTR) ++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1) + + /* + * struct kmem_cache related prototypes +@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t); + void kfree(const void *); + void kzfree(const void *); + size_t ksize(const void *); ++void check_object_size(const void *ptr, unsigned long n, bool to); + + /* + * Allocator specific definitions. These are mainly used to establish optimized +@@ -263,7 +275,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, + * request comes from. + */ + #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) +-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); ++extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1); + #define kmalloc_track_caller(size, flags) \ + __kmalloc_track_caller(size, flags, _RET_IP_) + #else +@@ -281,7 +293,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); + * allocation request comes from. + */ + #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) +-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); ++extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1); + #define kmalloc_node_track_caller(size, flags, node) \ + __kmalloc_node_track_caller(size, flags, node, \ + _RET_IP_) +diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h +index 850d057..6de7888 100644 +--- a/include/linux/slab_def.h ++++ b/include/linux/slab_def.h +@@ -69,10 +69,10 @@ struct kmem_cache { + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; +- atomic_t allochit; +- atomic_t allocmiss; +- atomic_t freehit; +- atomic_t freemiss; ++ atomic_unchecked_t allochit; ++ atomic_unchecked_t allocmiss; ++ atomic_unchecked_t freehit; ++ atomic_unchecked_t freemiss; + + /* + * If debugging is enabled, then the allocator can add additional +@@ -108,7 +108,7 @@ struct cache_sizes { + extern struct cache_sizes malloc_sizes[]; + + void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +-void *__kmalloc(size_t size, gfp_t flags); ++void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1); + + #ifdef CONFIG_KMEMTRACE + extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); +@@ -163,7 +163,7 @@ found: + } + + #ifdef CONFIG_NUMA +-extern void *__kmalloc_node(size_t size, gfp_t flags, int node); ++extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + + #ifdef CONFIG_KMEMTRACE +diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h +index 0ec00b3..39cb7fc 100644 +--- a/include/linux/slob_def.h ++++ b/include/linux/slob_def.h +@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, + return kmem_cache_alloc_node(cachep, flags, -1); + } + +-void *__kmalloc_node(size_t size, gfp_t flags, int node); ++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + + static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) + { +@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) + return __kmalloc_node(size, flags, -1); + } + ++static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1); + static __always_inline void *__kmalloc(size_t size, gfp_t flags) + { + return kmalloc(size, flags); +diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h +index 5ad70a6..08563d8 100644 +--- a/include/linux/slub_def.h ++++ b/include/linux/slub_def.h +@@ -86,7 +86,7 @@ struct kmem_cache { + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; + gfp_t allocflags; /* gfp flags to use on each alloc */ +- int refcount; /* Refcount for slab cache destroy */ ++ atomic_t refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ +@@ -145,6 +145,7 @@ extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; + * Sorry that the following has to be that ugly but some versions of GCC + * have trouble with constant propagation and loops. + */ ++static __always_inline int kmalloc_index(size_t size) __size_overflow(1); + static __always_inline int kmalloc_index(size_t size) + { + if (!size) +@@ -215,7 +216,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) + #endif + + void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +-void *__kmalloc(size_t size, gfp_t flags); ++void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1); + + #ifdef CONFIG_KMEMTRACE + extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); +@@ -227,6 +228,7 @@ kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) + } + #endif + ++static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1); + static __always_inline void *kmalloc_large(size_t size, gfp_t flags) + { + unsigned int order = get_order(size); +@@ -263,7 +265,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) + } + + #ifdef CONFIG_NUMA +-void *__kmalloc_node(size_t size, gfp_t flags, int node); ++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + + #ifdef CONFIG_KMEMTRACE +diff --git a/include/linux/sonet.h b/include/linux/sonet.h +index 67ad11f..0bbd8af 100644 +--- a/include/linux/sonet.h ++++ b/include/linux/sonet.h +@@ -61,7 +61,7 @@ struct sonet_stats { + #include <asm/atomic.h> + + struct k_sonet_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __SONET_ITEMS + #undef __HANDLE_ITEM + }; +diff --git a/include/linux/stddef.h b/include/linux/stddef.h +index 6a40c76..1747b67 100644 +--- a/include/linux/stddef.h ++++ b/include/linux/stddef.h +@@ -3,14 +3,10 @@ + + #include <linux/compiler.h> + ++#ifdef __KERNEL__ ++ + #undef NULL +-#if defined(__cplusplus) +-#define NULL 0 +-#else + #define NULL ((void *)0) +-#endif +- +-#ifdef __KERNEL__ + + enum { + false = 0, +diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h +index 6f52b4d..5500323 100644 +--- a/include/linux/sunrpc/cache.h ++++ b/include/linux/sunrpc/cache.h +@@ -125,7 +125,7 @@ struct cache_detail { + */ + struct cache_req { + struct cache_deferred_req *(*defer)(struct cache_req *req); +-}; ++} __no_const; + /* this must be embedded in a deferred_request that is being + * delayed awaiting cache-fill + */ +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h +index 8ed9642..101ceab 100644 +--- a/include/linux/sunrpc/clnt.h ++++ b/include/linux/sunrpc/clnt.h +@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap) + { + switch (sap->sa_family) { + case AF_INET: +- return ntohs(((struct sockaddr_in *)sap)->sin_port); ++ return ntohs(((const struct sockaddr_in *)sap)->sin_port); + case AF_INET6: +- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); ++ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port); + } + return 0; + } +@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, + static inline bool __rpc_copy_addr4(struct sockaddr *dst, + const struct sockaddr *src) + { +- const struct sockaddr_in *ssin = (struct sockaddr_in *) src; ++ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src; + struct sockaddr_in *dsin = (struct sockaddr_in *) dst; + + dsin->sin_family = ssin->sin_family; +@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa) + if (sa->sa_family != AF_INET6) + return 0; + +- return ((struct sockaddr_in6 *) sa)->sin6_scope_id; ++ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id; + } + + #endif /* __KERNEL__ */ +diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h +index c14fe86..393245e 100644 +--- a/include/linux/sunrpc/svc_rdma.h ++++ b/include/linux/sunrpc/svc_rdma.h +@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord; + extern unsigned int svcrdma_max_requests; + extern unsigned int svcrdma_max_req_size; + +-extern atomic_t rdma_stat_recv; +-extern atomic_t rdma_stat_read; +-extern atomic_t rdma_stat_write; +-extern atomic_t rdma_stat_sq_starve; +-extern atomic_t rdma_stat_rq_starve; +-extern atomic_t rdma_stat_rq_poll; +-extern atomic_t rdma_stat_rq_prod; +-extern atomic_t rdma_stat_sq_poll; +-extern atomic_t rdma_stat_sq_prod; ++extern atomic_unchecked_t rdma_stat_recv; ++extern atomic_unchecked_t rdma_stat_read; ++extern atomic_unchecked_t rdma_stat_write; ++extern atomic_unchecked_t rdma_stat_sq_starve; ++extern atomic_unchecked_t rdma_stat_rq_starve; ++extern atomic_unchecked_t rdma_stat_rq_poll; ++extern atomic_unchecked_t rdma_stat_rq_prod; ++extern atomic_unchecked_t rdma_stat_sq_poll; ++extern atomic_unchecked_t rdma_stat_sq_prod; + + #define RPCRDMA_VERSION 1 + +diff --git a/include/linux/suspend.h b/include/linux/suspend.h +index 5e781d8..1e62818 100644 +--- a/include/linux/suspend.h ++++ b/include/linux/suspend.h +@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t; + * which require special recovery actions in that situation. + */ + struct platform_suspend_ops { +- int (*valid)(suspend_state_t state); +- int (*begin)(suspend_state_t state); +- int (*prepare)(void); +- int (*prepare_late)(void); +- int (*enter)(suspend_state_t state); +- void (*wake)(void); +- void (*finish)(void); +- void (*end)(void); +- void (*recover)(void); ++ int (* const valid)(suspend_state_t state); ++ int (* const begin)(suspend_state_t state); ++ int (* const prepare)(void); ++ int (* const prepare_late)(void); ++ int (* const enter)(suspend_state_t state); ++ void (* const wake)(void); ++ void (* const finish)(void); ++ void (* const end)(void); ++ void (* const recover)(void); + }; + + #ifdef CONFIG_SUSPEND +@@ -120,7 +120,7 @@ struct platform_suspend_ops { + * suspend_set_ops - set platform dependent suspend operations + * @ops: The new suspend operations to set. + */ +-extern void suspend_set_ops(struct platform_suspend_ops *ops); ++extern void suspend_set_ops(const struct platform_suspend_ops *ops); + extern int suspend_valid_only_mem(suspend_state_t state); + + /** +@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state); + #else /* !CONFIG_SUSPEND */ + #define suspend_valid_only_mem NULL + +-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {} ++static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} + static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } + #endif /* !CONFIG_SUSPEND */ + +@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone); + * platforms which require special recovery actions in that situation. + */ + struct platform_hibernation_ops { +- int (*begin)(void); +- void (*end)(void); +- int (*pre_snapshot)(void); +- void (*finish)(void); +- int (*prepare)(void); +- int (*enter)(void); +- void (*leave)(void); +- int (*pre_restore)(void); +- void (*restore_cleanup)(void); +- void (*recover)(void); ++ int (* const begin)(void); ++ void (* const end)(void); ++ int (* const pre_snapshot)(void); ++ void (* const finish)(void); ++ int (* const prepare)(void); ++ int (* const enter)(void); ++ void (* const leave)(void); ++ int (* const pre_restore)(void); ++ void (* const restore_cleanup)(void); ++ void (* const recover)(void); + }; + + #ifdef CONFIG_HIBERNATION +@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *); + extern void swsusp_unset_page_free(struct page *); + extern unsigned long get_safe_page(gfp_t gfp_mask); + +-extern void hibernation_set_ops(struct platform_hibernation_ops *ops); ++extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); + extern int hibernate(void); + extern bool system_entering_hibernation(void); + #else /* CONFIG_HIBERNATION */ +@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } + static inline void swsusp_set_page_free(struct page *p) {} + static inline void swsusp_unset_page_free(struct page *p) {} + +-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} ++static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} + static inline int hibernate(void) { return -ENOSYS; } + static inline bool system_entering_hibernation(void) { return false; } + #endif /* CONFIG_HIBERNATION */ +diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h +index 0eb6942..a805cb6 100644 +--- a/include/linux/sysctl.h ++++ b/include/linux/sysctl.h +@@ -164,7 +164,11 @@ enum + KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ + }; + +- ++#ifdef CONFIG_PAX_SOFTMODE ++enum { ++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */ ++}; ++#endif + + /* CTL_VM names: */ + enum +@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write, + + extern int proc_dostring(struct ctl_table *, int, + void __user *, size_t *, loff_t *); ++extern int proc_dostring_modpriv(struct ctl_table *, int, ++ void __user *, size_t *, loff_t *); + extern int proc_dointvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + extern int proc_dointvec_minmax(struct ctl_table *, int, +@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen, + + extern ctl_handler sysctl_data; + extern ctl_handler sysctl_string; ++extern ctl_handler sysctl_string_modpriv; + extern ctl_handler sysctl_intvec; + extern ctl_handler sysctl_jiffies; + extern ctl_handler sysctl_ms_jiffies; +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h +index 9d68fed..71f02cc 100644 +--- a/include/linux/sysfs.h ++++ b/include/linux/sysfs.h +@@ -75,8 +75,8 @@ struct bin_attribute { + }; + + struct sysfs_ops { +- ssize_t (*show)(struct kobject *, struct attribute *,char *); +- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); ++ ssize_t (* const show)(struct kobject *, struct attribute *,char *); ++ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t); + }; + + struct sysfs_dirent; +diff --git a/include/linux/syslog.h b/include/linux/syslog.h +new file mode 100644 +index 0000000..3891139 +--- /dev/null ++++ b/include/linux/syslog.h +@@ -0,0 +1,52 @@ ++/* Syslog internals ++ * ++ * Copyright 2010 Canonical, Ltd. ++ * Author: Kees Cook <kees.cook@canonical.com> ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#ifndef _LINUX_SYSLOG_H ++#define _LINUX_SYSLOG_H ++ ++/* Close the log. Currently a NOP. */ ++#define SYSLOG_ACTION_CLOSE 0 ++/* Open the log. Currently a NOP. */ ++#define SYSLOG_ACTION_OPEN 1 ++/* Read from the log. */ ++#define SYSLOG_ACTION_READ 2 ++/* Read all messages remaining in the ring buffer. */ ++#define SYSLOG_ACTION_READ_ALL 3 ++/* Read and clear all messages remaining in the ring buffer */ ++#define SYSLOG_ACTION_READ_CLEAR 4 ++/* Clear ring buffer. */ ++#define SYSLOG_ACTION_CLEAR 5 ++/* Disable printk's to console */ ++#define SYSLOG_ACTION_CONSOLE_OFF 6 ++/* Enable printk's to console */ ++#define SYSLOG_ACTION_CONSOLE_ON 7 ++/* Set level of messages printed to console */ ++#define SYSLOG_ACTION_CONSOLE_LEVEL 8 ++/* Return number of unread characters in the log buffer */ ++#define SYSLOG_ACTION_SIZE_UNREAD 9 ++/* Return size of the log buffer */ ++#define SYSLOG_ACTION_SIZE_BUFFER 10 ++ ++#define SYSLOG_FROM_CALL 0 ++#define SYSLOG_FROM_FILE 1 ++ ++int do_syslog(int type, char __user *buf, int count, bool from_file); ++ ++#endif /* _LINUX_SYSLOG_H */ +diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h +index a8cc4e1..98d3b85 100644 +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -23,7 +23,7 @@ struct restart_block { + }; + /* For futex_wait and futex_wait_requeue_pi */ + struct { +- u32 *uaddr; ++ u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; +diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h +index 1eb44a9..f582df3 100644 +--- a/include/linux/tracehook.h ++++ b/include/linux/tracehook.h +@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task) + /* + * ptrace report for syscall entry and exit looks identical. + */ +-static inline void ptrace_report_syscall(struct pt_regs *regs) ++static inline int ptrace_report_syscall(struct pt_regs *regs) + { + int ptrace = task_ptrace(current); + + if (!(ptrace & PT_PTRACED)) +- return; ++ return 0; + + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + +@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs) + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } ++ ++ return fatal_signal_pending(current); + } + + /** +@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs) + static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) + { +- ptrace_report_syscall(regs); +- return 0; ++ return ptrace_report_syscall(regs); + } + + /** +diff --git a/include/linux/tty.h b/include/linux/tty.h +index e9c57e9..ee6d489 100644 +--- a/include/linux/tty.h ++++ b/include/linux/tty.h +@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void); + /* This last one is just for the tty layer internals and shouldn't be used elsewhere */ + extern void tty_ldisc_enable(struct tty_struct *tty); + +- + /* n_tty.c */ + extern struct tty_ldisc_ops tty_ldisc_N_TTY; + +diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h +index 0c4ee9b..9f7c426 100644 +--- a/include/linux/tty_ldisc.h ++++ b/include/linux/tty_ldisc.h +@@ -139,7 +139,7 @@ struct tty_ldisc_ops { + + struct module *owner; + +- int refcount; ++ atomic_t refcount; + }; + + struct tty_ldisc { +diff --git a/include/linux/types.h b/include/linux/types.h +index c42724f..d190eee 100644 +--- a/include/linux/types.h ++++ b/include/linux/types.h +@@ -191,10 +191,26 @@ typedef struct { + volatile int counter; + } atomic_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ volatile int counter; ++} atomic_unchecked_t; ++#else ++typedef atomic_t atomic_unchecked_t; ++#endif ++ + #ifdef CONFIG_64BIT + typedef struct { + volatile long counter; + } atomic64_t; ++ ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ volatile long counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif + #endif + + struct ustat { +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h +index 6b58367..57b150e 100644 +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to, + long ret; \ + mm_segment_t old_fs = get_fs(); \ + \ +- set_fs(KERNEL_DS); \ + pagefault_disable(); \ +- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ ++ set_fs(KERNEL_DS); \ ++ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \ + set_fs(old_fs); \ ++ pagefault_enable(); \ + ret; \ + }) + +@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-extern long probe_kernel_read(void *dst, void *src, size_t size); ++extern long probe_kernel_read(void *dst, const void *src, size_t size); + + /* + * probe_kernel_write(): safely attempt to write to a location +@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size); + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-extern long probe_kernel_write(void *dst, void *src, size_t size); ++extern long probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3); + + #endif /* __LINUX_UACCESS_H__ */ +diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h +index 99c1b4d..bb94261 100644 +--- a/include/linux/unaligned/access_ok.h ++++ b/include/linux/unaligned/access_ok.h +@@ -6,32 +6,32 @@ + + static inline u16 get_unaligned_le16(const void *p) + { +- return le16_to_cpup((__le16 *)p); ++ return le16_to_cpup((const __le16 *)p); + } + + static inline u32 get_unaligned_le32(const void *p) + { +- return le32_to_cpup((__le32 *)p); ++ return le32_to_cpup((const __le32 *)p); + } + + static inline u64 get_unaligned_le64(const void *p) + { +- return le64_to_cpup((__le64 *)p); ++ return le64_to_cpup((const __le64 *)p); + } + + static inline u16 get_unaligned_be16(const void *p) + { +- return be16_to_cpup((__be16 *)p); ++ return be16_to_cpup((const __be16 *)p); + } + + static inline u32 get_unaligned_be32(const void *p) + { +- return be32_to_cpup((__be32 *)p); ++ return be32_to_cpup((const __be32 *)p); + } + + static inline u64 get_unaligned_be64(const void *p) + { +- return be64_to_cpup((__be64 *)p); ++ return be64_to_cpup((const __be64 *)p); + } + + static inline void put_unaligned_le16(u16 val, void *p) +diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h +index 79b9837..b5a56f9 100644 +--- a/include/linux/vermagic.h ++++ b/include/linux/vermagic.h +@@ -26,9 +26,35 @@ + #define MODULE_ARCH_VERMAGIC "" + #endif + ++#ifdef CONFIG_PAX_REFCOUNT ++#define MODULE_PAX_REFCOUNT "REFCOUNT " ++#else ++#define MODULE_PAX_REFCOUNT "" ++#endif ++ ++#ifdef CONSTIFY_PLUGIN ++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN " ++#else ++#define MODULE_CONSTIFY_PLUGIN "" ++#endif ++ ++#ifdef STACKLEAK_PLUGIN ++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN " ++#else ++#define MODULE_STACKLEAK_PLUGIN "" ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++#define MODULE_GRSEC "GRSEC " ++#else ++#define MODULE_GRSEC "" ++#endif ++ + #define VERMAGIC_STRING \ + UTS_RELEASE " " \ + MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ +- MODULE_ARCH_VERMAGIC ++ MODULE_ARCH_VERMAGIC \ ++ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \ ++ MODULE_GRSEC + +diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h +index 819a634..dcc51e98 100644 +--- a/include/linux/vmalloc.h ++++ b/include/linux/vmalloc.h +@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ + #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ + #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ + #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */ ++#endif ++ + /* bits [20..32] reserved for arch specific ioremap internals */ + + /* +@@ -106,8 +111,8 @@ extern struct vm_struct *alloc_vm_area(size_t size); + extern void free_vm_area(struct vm_struct *area); + + /* for /dev/kmem */ +-extern long vread(char *buf, char *addr, unsigned long count); +-extern long vwrite(char *buf, char *addr, unsigned long count); ++extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3); ++extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3); + + /* + * Internals. Dont't use.. +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h +index 13070d6..aa4159a 100644 +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu) + /* + * Zone based page accounting with per cpu differentials. + */ +-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) + { +- atomic_long_add(x, &zone->vm_stat[item]); +- atomic_long_add(x, &vm_stat[item]); ++ atomic_long_add_unchecked(x, &zone->vm_stat[item]); ++ atomic_long_add_unchecked(x, &vm_stat[item]); + } + + static inline unsigned long global_page_state(enum zone_stat_item item) + { +- long x = atomic_long_read(&vm_stat[item]); ++ long x = atomic_long_read_unchecked(&vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item) + static inline unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone, + static inline unsigned long zone_page_state_snapshot(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + + #ifdef CONFIG_SMP + int cpu; +@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone, + + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_inc(&zone->vm_stat[item]); +- atomic_long_inc(&vm_stat[item]); ++ atomic_long_inc_unchecked(&zone->vm_stat[item]); ++ atomic_long_inc_unchecked(&vm_stat[item]); + } + + static inline void __inc_zone_page_state(struct page *page, +@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page, + + static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_dec(&zone->vm_stat[item]); +- atomic_long_dec(&vm_stat[item]); ++ atomic_long_dec_unchecked(&zone->vm_stat[item]); ++ atomic_long_dec_unchecked(&vm_stat[item]); + } + + static inline void __dec_zone_page_state(struct page *page, +diff --git a/include/linux/xattr.h b/include/linux/xattr.h +index 5c84af8..1a3b6e2 100644 +--- a/include/linux/xattr.h ++++ b/include/linux/xattr.h +@@ -33,6 +33,11 @@ + #define XATTR_USER_PREFIX "user." + #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) + ++/* User namespace */ ++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax." ++#define XATTR_PAX_FLAGS_SUFFIX "flags" ++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX ++ + struct inode; + struct dentry; + +diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h +index eed5fcc..5080d24 100644 +--- a/include/media/saa7146_vv.h ++++ b/include/media/saa7146_vv.h +@@ -167,7 +167,7 @@ struct saa7146_ext_vv + int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *); + + /* the extension can override this */ +- struct v4l2_ioctl_ops ops; ++ v4l2_ioctl_ops_no_const ops; + /* pointer to the saa7146 core ops */ + const struct v4l2_ioctl_ops *core_ops; + +diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h +index 73c9867..2da8837 100644 +--- a/include/media/v4l2-dev.h ++++ b/include/media/v4l2-dev.h +@@ -34,7 +34,7 @@ struct v4l2_device; + #define V4L2_FL_UNREGISTERED (0) + + struct v4l2_file_operations { +- struct module *owner; ++ struct module * const owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + unsigned int (*poll) (struct file *, struct poll_table_struct *); +@@ -46,6 +46,7 @@ struct v4l2_file_operations { + int (*open) (struct file *); + int (*release) (struct file *); + }; ++typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const; + + /* + * Newer version of video_device, handled by videodev2.c +diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h +index 5d5d550..f559ef1 100644 +--- a/include/media/v4l2-device.h ++++ b/include/media/v4l2-device.h +@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4 + this function returns 0. If the name ends with a digit (e.g. cx18), + then the name will be set to cx18-0 since cx180 looks really odd. */ + int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, +- atomic_t *instance); ++ atomic_unchecked_t *instance); + + /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects. + Since the parent disappears this ensures that v4l2_dev doesn't have an +diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h +index 7a4529d..7244290 100644 +--- a/include/media/v4l2-ioctl.h ++++ b/include/media/v4l2-ioctl.h +@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops { + long (*vidioc_default) (struct file *file, void *fh, + int cmd, void *arg); + }; ++typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const; + + + /* v4l debugging and diagnostics */ +diff --git a/include/net/flow.h b/include/net/flow.h +index 809970b..c3df4f3 100644 +--- a/include/net/flow.h ++++ b/include/net/flow.h +@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family, + extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, + u8 dir, flow_resolve_t resolver); + extern void flow_cache_flush(void); +-extern atomic_t flow_cache_genid; ++extern atomic_unchecked_t flow_cache_genid; + + static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2) + { +diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h +index 15e1f8fe..668837c 100644 +--- a/include/net/inetpeer.h ++++ b/include/net/inetpeer.h +@@ -24,7 +24,7 @@ struct inet_peer + __u32 dtime; /* the time of last use of not + * referenced entries */ + atomic_t refcnt; +- atomic_t rid; /* Frag reception counter */ ++ atomic_unchecked_t rid; /* Frag reception counter */ + __u32 tcp_ts; + unsigned long tcp_ts_stamp; + }; +diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h +index 98978e7..2243a3d 100644 +--- a/include/net/ip_vs.h ++++ b/include/net/ip_vs.h +@@ -365,7 +365,7 @@ struct ip_vs_conn { + struct ip_vs_conn *control; /* Master control connection */ + atomic_t n_control; /* Number of controlled ones */ + struct ip_vs_dest *dest; /* real server */ +- atomic_t in_pkts; /* incoming packet counter */ ++ atomic_unchecked_t in_pkts; /* incoming packet counter */ + + /* packet transmitter for different forwarding methods. If it + mangles the packet, it must return NF_DROP or better NF_STOLEN, +@@ -466,7 +466,7 @@ struct ip_vs_dest { + union nf_inet_addr addr; /* IP address of the server */ + __be16 port; /* port number of the server */ + volatile unsigned flags; /* dest status flags */ +- atomic_t conn_flags; /* flags to copy to conn */ ++ atomic_unchecked_t conn_flags; /* flags to copy to conn */ + atomic_t weight; /* server weight */ + + atomic_t refcnt; /* reference counter */ +diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h +index 69b610a..fe3962c 100644 +--- a/include/net/irda/ircomm_core.h ++++ b/include/net/irda/ircomm_core.h +@@ -51,7 +51,7 @@ typedef struct { + int (*connect_response)(struct ircomm_cb *, struct sk_buff *); + int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *, + struct ircomm_info *); +-} call_t; ++} __no_const call_t; + + struct ircomm_cb { + irda_queue_t queue; +diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h +index eea2e61..08c692d 100644 +--- a/include/net/irda/ircomm_tty.h ++++ b/include/net/irda/ircomm_tty.h +@@ -35,6 +35,7 @@ + #include <linux/termios.h> + #include <linux/timer.h> + #include <linux/tty.h> /* struct tty_struct */ ++#include <asm/local.h> + + #include <net/irda/irias_object.h> + #include <net/irda/ircomm_core.h> +@@ -105,8 +106,8 @@ struct ircomm_tty_cb { + unsigned short close_delay; + unsigned short closing_wait; /* time to wait before closing */ + +- int open_count; +- int blocked_open; /* # of blocked opens */ ++ local_t open_count; ++ local_t blocked_open; /* # of blocked opens */ + + /* Protect concurent access to : + * o self->open_count +diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h +index f82a1e8..82d81e8 100644 +--- a/include/net/iucv/af_iucv.h ++++ b/include/net/iucv/af_iucv.h +@@ -87,7 +87,7 @@ struct iucv_sock { + struct iucv_sock_list { + struct hlist_head head; + rwlock_t lock; +- atomic_t autobind_name; ++ atomic_unchecked_t autobind_name; + }; + + unsigned int iucv_sock_poll(struct file *file, struct socket *sock, +diff --git a/include/net/lapb.h b/include/net/lapb.h +index 96cb5dd..25e8d4f 100644 +--- a/include/net/lapb.h ++++ b/include/net/lapb.h +@@ -95,7 +95,7 @@ struct lapb_cb { + struct sk_buff_head write_queue; + struct sk_buff_head ack_queue; + unsigned char window; +- struct lapb_register_struct callbacks; ++ struct lapb_register_struct *callbacks; + + /* FRMR control information */ + struct lapb_frame frmr_data; +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 3817fda..cdb2343 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -131,7 +131,7 @@ struct neigh_ops + int (*connected_output)(struct sk_buff*); + int (*hh_output)(struct sk_buff*); + int (*queue_xmit)(struct sk_buff*); +-}; ++} __do_const; + + struct pneigh_entry + { +diff --git a/include/net/netlink.h b/include/net/netlink.h +index c344646..4778c71 100644 +--- a/include/net/netlink.h ++++ b/include/net/netlink.h +@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining) + { + return (remaining >= (int) sizeof(struct nlmsghdr) && + nlh->nlmsg_len >= sizeof(struct nlmsghdr) && +- nlh->nlmsg_len <= remaining); ++ nlh->nlmsg_len <= (unsigned int)remaining); + } + + /** +@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb) + static inline void nlmsg_trim(struct sk_buff *skb, const void *mark) + { + if (mark) +- skb_trim(skb, (unsigned char *) mark - skb->data); ++ skb_trim(skb, (const unsigned char *) mark - skb->data); + } + + /** +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index 9a4b8b7..e49e077 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -54,7 +54,7 @@ struct netns_ipv4 { + int current_rt_cache_rebuild_count; + + struct timer_list rt_secret_timer; +- atomic_t rt_genid; ++ atomic_unchecked_t rt_genid; + + #ifdef CONFIG_IP_MROUTE + struct sock *mroute_sk; +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h +index 8a6d529..171f401 100644 +--- a/include/net/sctp/sctp.h ++++ b/include/net/sctp/sctp.h +@@ -305,8 +305,8 @@ extern int sctp_debug_flag; + + #else /* SCTP_DEBUG */ + +-#define SCTP_DEBUG_PRINTK(whatever...) +-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) ++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0) ++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0) + #define SCTP_ENABLE_DEBUG + #define SCTP_DISABLE_DEBUG + #define SCTP_ASSERT(expr, str, func) +diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h +index d97f689..f3b90ab 100644 +--- a/include/net/secure_seq.h ++++ b/include/net/secure_seq.h +@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr); + extern __u32 secure_ipv6_id(const __be32 daddr[4]); + extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); + extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +- __be16 dport); ++ __be16 dport); + extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport); + extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, +- __be16 sport, __be16 dport); ++ __be16 sport, __be16 dport); + extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, +- __be16 sport, __be16 dport); ++ __be16 sport, __be16 dport); + extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, +- __be16 sport, __be16 dport); ++ __be16 sport, __be16 dport); + + #endif /* _NET_SECURE_SEQ */ +diff --git a/include/net/sock.h b/include/net/sock.h +index 78adf52..99afd29 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -272,7 +272,7 @@ struct sock { + rwlock_t sk_callback_lock; + int sk_err, + sk_err_soft; +- atomic_t sk_drops; ++ atomic_unchecked_t sk_drops; + unsigned short sk_ack_backlog; + unsigned short sk_max_ack_backlog; + __u32 sk_priority; +@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) + extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); + extern int sock_prot_inuse_get(struct net *net, struct proto *proto); + #else +-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, ++static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, + int inc) + { + } +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 6cfe18b..dd21acb 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1444,8 +1444,8 @@ enum tcp_seq_states { + struct tcp_seq_afinfo { + char *name; + sa_family_t family; +- struct file_operations seq_fops; +- struct seq_operations seq_ops; ++ file_operations_no_const seq_fops; ++ seq_operations_no_const seq_ops; + }; + + struct tcp_iter_state { +diff --git a/include/net/udp.h b/include/net/udp.h +index f98abd2..b4b042f 100644 +--- a/include/net/udp.h ++++ b/include/net/udp.h +@@ -187,8 +187,8 @@ struct udp_seq_afinfo { + char *name; + sa_family_t family; + struct udp_table *udp_table; +- struct file_operations seq_fops; +- struct seq_operations seq_ops; ++ file_operations_no_const seq_fops; ++ seq_operations_no_const seq_ops; + }; + + struct udp_iter_state { +diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h +index cbb822e..e9c1cbe 100644 +--- a/include/rdma/iw_cm.h ++++ b/include/rdma/iw_cm.h +@@ -129,7 +129,7 @@ struct iw_cm_verbs { + int backlog); + + int (*destroy_listen)(struct iw_cm_id *cm_id); +-}; ++} __no_const; + + /** + * iw_create_cm_id - Create an IW CM identifier. +diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h +index 09a124b..caa8ca8 100644 +--- a/include/scsi/libfc.h ++++ b/include/scsi/libfc.h +@@ -675,6 +675,7 @@ struct libfc_function_template { + */ + void (*disc_stop_final) (struct fc_lport *); + }; ++typedef struct libfc_function_template __no_const libfc_function_template_no_const; + + /* information used by the discovery layer */ + struct fc_disc { +@@ -707,7 +708,7 @@ struct fc_lport { + struct fc_disc disc; + + /* Operational Information */ +- struct libfc_function_template tt; ++ libfc_function_template_no_const tt; + u8 link_up; + u8 qfull; + enum fc_lport_state state; +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h +index de8e180..f15e0d7 100644 +--- a/include/scsi/scsi_device.h ++++ b/include/scsi/scsi_device.h +@@ -156,9 +156,9 @@ struct scsi_device { + unsigned int max_device_blocked; /* what device_blocked counts down from */ + #define SCSI_DEFAULT_DEVICE_BLOCKED 3 + +- atomic_t iorequest_cnt; +- atomic_t iodone_cnt; +- atomic_t ioerr_cnt; ++ atomic_unchecked_t iorequest_cnt; ++ atomic_unchecked_t iodone_cnt; ++ atomic_unchecked_t ioerr_cnt; + + struct device sdev_gendev, + sdev_dev; +diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h +index 0b4baba..0106e9e 100644 +--- a/include/scsi/scsi_host.h ++++ b/include/scsi/scsi_host.h +@@ -43,6 +43,12 @@ struct blk_queue_tags; + #define DISABLE_CLUSTERING 0 + #define ENABLE_CLUSTERING 1 + ++enum { ++ SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */ ++ SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */ ++ SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshhold event */ ++}; ++ + struct scsi_host_template { + struct module *module; + const char *name; +diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h +index fc50bd6..81ba9cb 100644 +--- a/include/scsi/scsi_transport_fc.h ++++ b/include/scsi/scsi_transport_fc.h +@@ -708,7 +708,7 @@ struct fc_function_template { + unsigned long show_host_system_hostname:1; + + unsigned long disable_target_scan:1; +-}; ++} __do_const; + + + /** +diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h +index 3dae3f7..8440d6f 100644 +--- a/include/sound/ac97_codec.h ++++ b/include/sound/ac97_codec.h +@@ -419,15 +419,15 @@ + struct snd_ac97; + + struct snd_ac97_build_ops { +- int (*build_3d) (struct snd_ac97 *ac97); +- int (*build_specific) (struct snd_ac97 *ac97); +- int (*build_spdif) (struct snd_ac97 *ac97); +- int (*build_post_spdif) (struct snd_ac97 *ac97); ++ int (* const build_3d) (struct snd_ac97 *ac97); ++ int (* const build_specific) (struct snd_ac97 *ac97); ++ int (* const build_spdif) (struct snd_ac97 *ac97); ++ int (* const build_post_spdif) (struct snd_ac97 *ac97); + #ifdef CONFIG_PM +- void (*suspend) (struct snd_ac97 *ac97); +- void (*resume) (struct snd_ac97 *ac97); ++ void (* const suspend) (struct snd_ac97 *ac97); ++ void (* const resume) (struct snd_ac97 *ac97); + #endif +- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */ ++ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */ + }; + + struct snd_ac97_bus_ops { +@@ -477,7 +477,7 @@ struct snd_ac97_template { + + struct snd_ac97 { + /* -- lowlevel (hardware) driver specific -- */ +- struct snd_ac97_build_ops * build_ops; ++ const struct snd_ac97_build_ops * build_ops; + void *private_data; + void (*private_free) (struct snd_ac97 *ac97); + /* --- */ +diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h +index 891cf1a..a94ba2b 100644 +--- a/include/sound/ak4xxx-adda.h ++++ b/include/sound/ak4xxx-adda.h +@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops { + void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg, + unsigned char val); + void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate); +-}; ++} __no_const; + + #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */ + +diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h +index 8c05e47..2b5df97 100644 +--- a/include/sound/hwdep.h ++++ b/include/sound/hwdep.h +@@ -49,7 +49,7 @@ struct snd_hwdep_ops { + struct snd_hwdep_dsp_status *status); + int (*dsp_load)(struct snd_hwdep *hw, + struct snd_hwdep_dsp_image *image); +-}; ++} __no_const; + + struct snd_hwdep { + struct snd_card *card; +diff --git a/include/sound/info.h b/include/sound/info.h +index 112e894..6fda5b5 100644 +--- a/include/sound/info.h ++++ b/include/sound/info.h +@@ -44,7 +44,7 @@ struct snd_info_entry_text { + struct snd_info_buffer *buffer); + void (*write)(struct snd_info_entry *entry, + struct snd_info_buffer *buffer); +-}; ++} __no_const; + + struct snd_info_entry_ops { + int (*open)(struct snd_info_entry *entry, +diff --git a/include/sound/pcm.h b/include/sound/pcm.h +index de6d981..590a550 100644 +--- a/include/sound/pcm.h ++++ b/include/sound/pcm.h +@@ -80,6 +80,7 @@ struct snd_pcm_ops { + int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma); + int (*ack)(struct snd_pcm_substream *substream); + }; ++typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const; + + /* + * +diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h +index 736eac7..fe8a80f 100644 +--- a/include/sound/sb16_csp.h ++++ b/include/sound/sb16_csp.h +@@ -139,7 +139,7 @@ struct snd_sb_csp_ops { + int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels); + int (*csp_stop) (struct snd_sb_csp * p); + int (*csp_qsound_transfer) (struct snd_sb_csp * p); +-}; ++} __no_const; + + /* + * CSP private data +diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h +index 444cd6b..3327cc5 100644 +--- a/include/sound/ymfpci.h ++++ b/include/sound/ymfpci.h +@@ -358,7 +358,7 @@ struct snd_ymfpci { + spinlock_t reg_lock; + spinlock_t voice_lock; + wait_queue_head_t interrupt_sleep; +- atomic_t interrupt_sleep_count; ++ atomic_unchecked_t interrupt_sleep_count; + struct snd_info_entry *proc_entry; + const struct firmware *dsp_microcode; + const struct firmware *controller_microcode; +diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h +index b89f9db..f097b38 100644 +--- a/include/trace/events/irq.h ++++ b/include/trace/events/irq.h +@@ -34,7 +34,7 @@ + */ + TRACE_EVENT(irq_handler_entry, + +- TP_PROTO(int irq, struct irqaction *action), ++ TP_PROTO(int irq, const struct irqaction *action), + + TP_ARGS(irq, action), + +@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry, + */ + TRACE_EVENT(irq_handler_exit, + +- TP_PROTO(int irq, struct irqaction *action, int ret), ++ TP_PROTO(int irq, const struct irqaction *action, int ret), + + TP_ARGS(irq, action, ret), + +@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit, + */ + TRACE_EVENT(softirq_entry, + +- TP_PROTO(struct softirq_action *h, struct softirq_action *vec), ++ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec), + + TP_ARGS(h, vec), + +@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry, + */ + TRACE_EVENT(softirq_exit, + +- TP_PROTO(struct softirq_action *h, struct softirq_action *vec), ++ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec), + + TP_ARGS(h, vec), + +diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h +index 0993a22..32ba2fe 100644 +--- a/include/video/uvesafb.h ++++ b/include/video/uvesafb.h +@@ -177,6 +177,7 @@ struct uvesafb_par { + u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */ + u8 pmi_setpal; /* PMI for palette changes */ + u16 *pmi_base; /* protected mode interface location */ ++ u8 *pmi_code; /* protected mode code location */ + void *pmi_start; + void *pmi_pal; + u8 *vbe_state_orig; /* +diff --git a/init/Kconfig b/init/Kconfig +index d72691b..3996e54 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1004,7 +1004,7 @@ config SLUB_DEBUG + + config COMPAT_BRK + bool "Disable heap randomization" +- default y ++ default n + help + Randomizing heap placement makes heap exploits harder, but it + also breaks ancient binaries (including anything libc5 based). +diff --git a/init/do_mounts.c b/init/do_mounts.c +index bb008d0..4fa3933 100644 +--- a/init/do_mounts.c ++++ b/init/do_mounts.c +@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page) + + static int __init do_mount_root(char *name, char *fs, int flags, void *data) + { +- int err = sys_mount(name, "/root", fs, flags, data); ++ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data); + if (err) + return err; + +- sys_chdir("/root"); ++ sys_chdir((__force const char __user *)"/root"); + ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev; + printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n", + current->fs->pwd.mnt->mnt_sb->s_type->name, +@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...) + va_start(args, fmt); + vsprintf(buf, fmt, args); + va_end(args); +- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0); ++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0); + if (fd >= 0) { + sys_ioctl(fd, FDEJECT, 0); + sys_close(fd); + } + printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf); +- fd = sys_open("/dev/console", O_RDWR, 0); ++ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0); + if (fd >= 0) { + sys_ioctl(fd, TCGETS, (long)&termios); + termios.c_lflag &= ~ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); +- sys_read(fd, &c, 1); ++ sys_read(fd, (char __user *)&c, 1); + termios.c_lflag |= ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); + sys_close(fd); +@@ -416,6 +416,6 @@ void __init prepare_namespace(void) + mount_root(); + out: + devtmpfs_mount("dev"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot("."); ++ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((__force char __user *)"."); + } +diff --git a/init/do_mounts.h b/init/do_mounts.h +index f5b978a..a34abde 100644 +--- a/init/do_mounts.h ++++ b/init/do_mounts.h +@@ -15,15 +15,15 @@ extern int root_mountflags; + + static inline int create_dev(char *name, dev_t dev) + { +- sys_unlink(name); +- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); ++ sys_unlink((char __force_user *)name); ++ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev)); + } + + #if BITS_PER_LONG == 32 + static inline u32 bstat(char *name) + { + struct stat64 stat; +- if (sys_stat64(name, &stat) != 0) ++ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0) + return 0; + if (!S_ISBLK(stat.st_mode)) + return 0; +@@ -35,7 +35,7 @@ static inline u32 bstat(char *name) + static inline u32 bstat(char *name) + { + struct stat stat; +- if (sys_newstat(name, &stat) != 0) ++ if (sys_newstat((char __force_user *)name, (struct stat __force_user *)&stat) != 0) + return 0; + if (!S_ISBLK(stat.st_mode)) + return 0; +diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c +index 614241b..4da046b 100644 +--- a/init/do_mounts_initrd.c ++++ b/init/do_mounts_initrd.c +@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell) + sys_close(old_fd);sys_close(root_fd); + sys_close(0);sys_close(1);sys_close(2); + sys_setsid(); +- (void) sys_open("/dev/console",O_RDWR,0); ++ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0); + (void) sys_dup(0); + (void) sys_dup(0); + return kernel_execve(shell, argv, envp_init); +@@ -47,13 +47,13 @@ static void __init handle_initrd(void) + create_dev("/dev/root.old", Root_RAM0); + /* mount initrd on rootfs' /root */ + mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); +- sys_mkdir("/old", 0700); +- root_fd = sys_open("/", 0, 0); +- old_fd = sys_open("/old", 0, 0); ++ sys_mkdir((const char __force_user *)"/old", 0700); ++ root_fd = sys_open((const char __force_user *)"/", 0, 0); ++ old_fd = sys_open((const char __force_user *)"/old", 0, 0); + /* move initrd over / and chdir/chroot in initrd root */ +- sys_chdir("/root"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot("."); ++ sys_chdir((const char __force_user *)"/root"); ++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((const char __force_user *)"."); + + /* + * In case that a resume from disk is carried out by linuxrc or one of +@@ -70,15 +70,15 @@ static void __init handle_initrd(void) + + /* move initrd to rootfs' /old */ + sys_fchdir(old_fd); +- sys_mount("/", ".", NULL, MS_MOVE, NULL); ++ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL); + /* switch root and cwd back to / of rootfs */ + sys_fchdir(root_fd); +- sys_chroot("."); ++ sys_chroot((const char __force_user *)"."); + sys_close(old_fd); + sys_close(root_fd); + + if (new_decode_dev(real_root_dev) == Root_RAM0) { +- sys_chdir("/old"); ++ sys_chdir((const char __force_user *)"/old"); + return; + } + +@@ -86,17 +86,17 @@ static void __init handle_initrd(void) + mount_root(); + + printk(KERN_NOTICE "Trying to move old root to /initrd ... "); +- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL); ++ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL); + if (!error) + printk("okay\n"); + else { +- int fd = sys_open("/dev/root.old", O_RDWR, 0); ++ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0); + if (error == -ENOENT) + printk("/initrd does not exist. Ignored.\n"); + else + printk("failed\n"); + printk(KERN_NOTICE "Unmounting old root\n"); +- sys_umount("/old", MNT_DETACH); ++ sys_umount((char __force_user *)"/old", MNT_DETACH); + printk(KERN_NOTICE "Trying to free ramdisk memory ... "); + if (fd < 0) { + error = fd; +@@ -119,11 +119,11 @@ int __init initrd_load(void) + * mounted in the normal path. + */ + if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { +- sys_unlink("/initrd.image"); ++ sys_unlink((const char __force_user *)"/initrd.image"); + handle_initrd(); + return 1; + } + } +- sys_unlink("/initrd.image"); ++ sys_unlink((const char __force_user *)"/initrd.image"); + return 0; + } +diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c +index 69aebbf..c0bf6a7 100644 +--- a/init/do_mounts_md.c ++++ b/init/do_mounts_md.c +@@ -170,7 +170,7 @@ static void __init md_setup_drive(void) + partitioned ? "_d" : "", minor, + md_setup_args[ent].device_names); + +- fd = sys_open(name, 0, 0); ++ fd = sys_open((char __force_user *)name, 0, 0); + if (fd < 0) { + printk(KERN_ERR "md: open failed - cannot start " + "array %s\n", name); +@@ -233,7 +233,7 @@ static void __init md_setup_drive(void) + * array without it + */ + sys_close(fd); +- fd = sys_open(name, 0, 0); ++ fd = sys_open((char __force_user *)name, 0, 0); + sys_ioctl(fd, BLKRRPART, 0); + } + sys_close(fd); +@@ -283,7 +283,7 @@ static void __init autodetect_raid(void) + + wait_for_device_probe(); + +- fd = sys_open("/dev/md0", 0, 0); ++ fd = sys_open((__force char __user *)"/dev/md0", 0, 0); + if (fd >= 0) { + sys_ioctl(fd, RAID_AUTORUN, raid_autopart); + sys_close(fd); +diff --git a/init/initramfs.c b/init/initramfs.c +index 1fd59b8..a01b079 100644 +--- a/init/initramfs.c ++++ b/init/initramfs.c +@@ -74,7 +74,7 @@ static void __init free_hash(void) + } + } + +-static long __init do_utime(char __user *filename, time_t mtime) ++static long __init do_utime(__force char __user *filename, time_t mtime) + { + struct timespec t[2]; + +@@ -109,7 +109,7 @@ static void __init dir_utime(void) + struct dir_entry *de, *tmp; + list_for_each_entry_safe(de, tmp, &dir_list, list) { + list_del(&de->list); +- do_utime(de->name, de->mtime); ++ do_utime((char __force_user *)de->name, de->mtime); + kfree(de->name); + kfree(de); + } +@@ -271,7 +271,7 @@ static int __init maybe_link(void) + if (nlink >= 2) { + char *old = find_link(major, minor, ino, mode, collected); + if (old) +- return (sys_link(old, collected) < 0) ? -1 : 1; ++ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1; + } + return 0; + } +@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode) + { + struct stat st; + +- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) { ++ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) { + if (S_ISDIR(st.st_mode)) +- sys_rmdir(path); ++ sys_rmdir((char __force_user *)path); + else +- sys_unlink(path); ++ sys_unlink((char __force_user *)path); + } + } + +@@ -305,7 +305,7 @@ static int __init do_name(void) + int openflags = O_WRONLY|O_CREAT; + if (ml != 1) + openflags |= O_TRUNC; +- wfd = sys_open(collected, openflags, mode); ++ wfd = sys_open((char __force_user *)collected, openflags, mode); + + if (wfd >= 0) { + sys_fchown(wfd, uid, gid); +@@ -317,17 +317,17 @@ static int __init do_name(void) + } + } + } else if (S_ISDIR(mode)) { +- sys_mkdir(collected, mode); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); ++ sys_mkdir((char __force_user *)collected, mode); ++ sys_chown((char __force_user *)collected, uid, gid); ++ sys_chmod((char __force_user *)collected, mode); + dir_add(collected, mtime); + } else if (S_ISBLK(mode) || S_ISCHR(mode) || + S_ISFIFO(mode) || S_ISSOCK(mode)) { + if (maybe_link() == 0) { +- sys_mknod(collected, mode, rdev); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); +- do_utime(collected, mtime); ++ sys_mknod((char __force_user *)collected, mode, rdev); ++ sys_chown((char __force_user *)collected, uid, gid); ++ sys_chmod((char __force_user *)collected, mode); ++ do_utime((char __force_user *)collected, mtime); + } + } + return 0; +@@ -336,15 +336,15 @@ static int __init do_name(void) + static int __init do_copy(void) + { + if (count >= body_len) { +- sys_write(wfd, victim, body_len); ++ sys_write(wfd, (char __force_user *)victim, body_len); + sys_close(wfd); +- do_utime(vcollected, mtime); ++ do_utime((char __force_user *)vcollected, mtime); + kfree(vcollected); + eat(body_len); + state = SkipIt; + return 0; + } else { +- sys_write(wfd, victim, count); ++ sys_write(wfd, (char __force_user *)victim, count); + body_len -= count; + eat(count); + return 1; +@@ -355,9 +355,9 @@ static int __init do_symlink(void) + { + collected[N_ALIGN(name_len) + body_len] = '\0'; + clean_path(collected, 0); +- sys_symlink(collected + N_ALIGN(name_len), collected); +- sys_lchown(collected, uid, gid); +- do_utime(collected, mtime); ++ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected); ++ sys_lchown((char __force_user *)collected, uid, gid); ++ do_utime((char __force_user *)collected, mtime); + state = SkipIt; + next_state = Reset; + return 0; +diff --git a/init/main.c b/init/main.c +index 1eb4bd5..fea5bbe 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { } + #ifdef CONFIG_TC + extern void tc_init(void); + #endif ++extern void grsecurity_init(void); + + enum system_states system_state __read_mostly; + EXPORT_SYMBOL(system_state); +@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str) + + __setup("reset_devices", set_reset_devices); + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++extern char pax_enter_kernel_user[]; ++extern char pax_exit_kernel_user[]; ++extern pgdval_t clone_pgd_mask; ++#endif ++ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF) ++static int __init setup_pax_nouderef(char *str) ++{ ++#ifdef CONFIG_X86_32 ++ unsigned int cpu; ++ struct desc_struct *gdt; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ gdt = get_cpu_gdt_table(cpu); ++ gdt[GDT_ENTRY_KERNEL_DS].type = 3; ++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; ++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf; ++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf; ++ } ++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory"); ++#else ++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1); ++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1); ++ clone_pgd_mask = ~(pgdval_t)0UL; ++#endif ++ ++ return 0; ++} ++early_param("pax_nouderef", setup_pax_nouderef); ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++int pax_softmode; ++ ++static int __init setup_pax_softmode(char *str) ++{ ++ get_option(&str, &pax_softmode); ++ return 1; ++} ++__setup("pax_softmode=", setup_pax_softmode); ++#endif ++ + static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; + char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; + static const char *panic_later, *panic_param; +@@ -705,52 +749,53 @@ int initcall_debug; + core_param(initcall_debug, initcall_debug, bool, 0644); + + static char msgbuf[64]; +-static struct boot_trace_call call; +-static struct boot_trace_ret ret; ++static struct boot_trace_call trace_call; ++static struct boot_trace_ret trace_ret; + + int do_one_initcall(initcall_t fn) + { + int count = preempt_count(); + ktime_t calltime, delta, rettime; ++ const char *msg1 = "", *msg2 = ""; + + if (initcall_debug) { +- call.caller = task_pid_nr(current); +- printk("calling %pF @ %i\n", fn, call.caller); ++ trace_call.caller = task_pid_nr(current); ++ printk("calling %pF @ %i\n", fn, trace_call.caller); + calltime = ktime_get(); +- trace_boot_call(&call, fn); ++ trace_boot_call(&trace_call, fn); + enable_boot_trace(); + } + +- ret.result = fn(); ++ trace_ret.result = fn(); + + if (initcall_debug) { + disable_boot_trace(); + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); +- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; +- trace_boot_ret(&ret, fn); ++ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; ++ trace_boot_ret(&trace_ret, fn); + printk("initcall %pF returned %d after %Ld usecs\n", fn, +- ret.result, ret.duration); ++ trace_ret.result, trace_ret.duration); + } + + msgbuf[0] = 0; + +- if (ret.result && ret.result != -ENODEV && initcall_debug) +- sprintf(msgbuf, "error code %d ", ret.result); ++ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug) ++ sprintf(msgbuf, "error code %d ", trace_ret.result); + + if (preempt_count() != count) { +- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); ++ msg1 = " preemption imbalance"; + preempt_count() = count; + } + if (irqs_disabled()) { +- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); ++ msg2 = " disabled interrupts"; + local_irq_enable(); + } +- if (msgbuf[0]) { +- printk("initcall %pF returned with %s\n", fn, msgbuf); ++ if (msgbuf[0] || *msg1 || *msg2) { ++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2); + } + +- return ret.result; ++ return trace_ret.result; + } + + +@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused) + if (!ramdisk_execute_command) + ramdisk_execute_command = "/init"; + +- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { ++ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) { + ramdisk_execute_command = NULL; + prepare_namespace(); + } + ++ grsecurity_init(); ++ + /* + * Ok, we have completed the initial bootup, and + * we're essentially up and running. Get rid of the +diff --git a/init/noinitramfs.c b/init/noinitramfs.c +index f4c1a3a..96c19bd 100644 +--- a/init/noinitramfs.c ++++ b/init/noinitramfs.c +@@ -29,7 +29,7 @@ static int __init default_rootfs(void) + { + int err; + +- err = sys_mkdir("/dev", 0755); ++ err = sys_mkdir((const char __user *)"/dev", 0755); + if (err < 0) + goto out; + +@@ -39,7 +39,7 @@ static int __init default_rootfs(void) + if (err < 0) + goto out; + +- err = sys_mkdir("/root", 0700); ++ err = sys_mkdir((const char __user *)"/root", 0700); + if (err < 0) + goto out; + +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index d01bc14..8df81db 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, + mq_bytes = (mq_msg_tblsz + + (info->attr.mq_maxmsg * info->attr.mq_msgsize)); + ++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1); + spin_lock(&mq_lock); + if (u->mq_bytes + mq_bytes < u->mq_bytes || + u->mq_bytes + mq_bytes > +diff --git a/ipc/msg.c b/ipc/msg.c +index 779f762..4af9e36 100644 +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) + return security_msg_queue_associate(msq, msgflg); + } + ++static struct ipc_ops msg_ops = { ++ .getnew = newque, ++ .associate = msg_security, ++ .more_checks = NULL ++}; ++ + SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) + { + struct ipc_namespace *ns; +- struct ipc_ops msg_ops; + struct ipc_params msg_params; + + ns = current->nsproxy->ipc_ns; + +- msg_ops.getnew = newque; +- msg_ops.associate = msg_security; +- msg_ops.more_checks = NULL; +- + msg_params.key = key; + msg_params.flg = msgflg; + +diff --git a/ipc/sem.c b/ipc/sem.c +index b781007..f738b04 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, + return 0; + } + ++static struct ipc_ops sem_ops = { ++ .getnew = newary, ++ .associate = sem_security, ++ .more_checks = sem_more_checks ++}; ++ + SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) + { + struct ipc_namespace *ns; +- struct ipc_ops sem_ops; + struct ipc_params sem_params; + + ns = current->nsproxy->ipc_ns; +@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) + if (nsems < 0 || nsems > ns->sc_semmsl) + return -EINVAL; + +- sem_ops.getnew = newary; +- sem_ops.associate = sem_security; +- sem_ops.more_checks = sem_more_checks; +- + sem_params.key = key; + sem_params.flg = semflg; + sem_params.u.nsems = nsems; +@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, + ushort* sem_io = fast_sem_io; + int nsems; + ++ pax_track_stack(); ++ + sma = sem_lock_check(ns, semid); + if (IS_ERR(sma)) + return PTR_ERR(sma); +@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, + unsigned long jiffies_left = 0; + struct ipc_namespace *ns; + ++ pax_track_stack(); ++ + ns = current->nsproxy->ipc_ns; + + if (nsops < 1 || semid < 0) +diff --git a/ipc/shm.c b/ipc/shm.c +index d30732c..e4992cd 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); + static int sysvipc_shm_proc_show(struct seq_file *s, void *it); + #endif + ++#ifdef CONFIG_GRKERNSEC ++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, ++ const int shmid); ++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime); ++#endif ++ + void shm_init_ns(struct ipc_namespace *ns) + { + ns->shm_ctlmax = SHMMAX; +@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) + shp->shm_lprid = 0; + shp->shm_atim = shp->shm_dtim = 0; + shp->shm_ctim = get_seconds(); ++#ifdef CONFIG_GRKERNSEC ++ { ++ struct timespec timeval; ++ do_posix_clock_monotonic_gettime(&timeval); ++ ++ shp->shm_createtime = timeval.tv_sec; ++ } ++#endif + shp->shm_segsz = size; + shp->shm_nattch = 0; + shp->shm_file = file; +@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, + return 0; + } + ++static struct ipc_ops shm_ops = { ++ .getnew = newseg, ++ .associate = shm_security, ++ .more_checks = shm_more_checks ++}; ++ + SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) + { + struct ipc_namespace *ns; +- struct ipc_ops shm_ops; + struct ipc_params shm_params; + + ns = current->nsproxy->ipc_ns; + +- shm_ops.getnew = newseg; +- shm_ops.associate = shm_security; +- shm_ops.more_checks = shm_more_checks; +- + shm_params.key = key; + shm_params.flg = shmflg; + shm_params.u.size = size; +@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) + f_mode = FMODE_READ | FMODE_WRITE; + } + if (shmflg & SHM_EXEC) { ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->pax_flags & MF_PAX_MPROTECT) ++ goto out; ++#endif ++ + prot |= PROT_EXEC; + acc_mode |= S_IXUGO; + } +@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) + if (err) + goto out_unlock; + ++#ifdef CONFIG_GRKERNSEC ++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime, ++ shp->shm_perm.cuid, shmid) || ++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) { ++ err = -EACCES; ++ goto out_unlock; ++ } ++#endif ++ + path.dentry = dget(shp->shm_file->f_path.dentry); + path.mnt = shp->shm_file->f_path.mnt; + shp->shm_nattch++; ++#ifdef CONFIG_GRKERNSEC ++ shp->shm_lapid = current->pid; ++#endif + size = i_size_read(path.dentry->d_inode); + shm_unlock(shp); + +diff --git a/kernel/acct.c b/kernel/acct.c +index a6605ca..ca91111 100644 +--- a/kernel/acct.c ++++ b/kernel/acct.c +@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, + */ + flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; +- file->f_op->write(file, (char *)&ac, ++ file->f_op->write(file, (char __force_user *)&ac, + sizeof(acct_t), &file->f_pos); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; + set_fs(fs); +diff --git a/kernel/audit.c b/kernel/audit.c +index 5feed23..48415fd 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0; + 3) suppressed due to audit_rate_limit + 4) suppressed due to audit_backlog_limit + */ +-static atomic_t audit_lost = ATOMIC_INIT(0); ++static atomic_unchecked_t audit_lost = ATOMIC_INIT(0); + + /* The netlink socket. */ + static struct sock *audit_sock; +@@ -232,7 +232,7 @@ void audit_log_lost(const char *message) + unsigned long now; + int print; + +- atomic_inc(&audit_lost); ++ atomic_inc_unchecked(&audit_lost); + + print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); + +@@ -251,7 +251,7 @@ void audit_log_lost(const char *message) + printk(KERN_WARNING + "audit: audit_lost=%d audit_rate_limit=%d " + "audit_backlog_limit=%d\n", +- atomic_read(&audit_lost), ++ atomic_read_unchecked(&audit_lost), + audit_rate_limit, + audit_backlog_limit); + audit_panic(message); +@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + status_set.pid = audit_pid; + status_set.rate_limit = audit_rate_limit; + status_set.backlog_limit = audit_backlog_limit; +- status_set.lost = atomic_read(&audit_lost); ++ status_set.lost = atomic_read_unchecked(&audit_lost); + status_set.backlog = skb_queue_len(&audit_skb_queue); + audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, + &status_set, sizeof(status_set)); +@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + spin_unlock_irq(&tsk->sighand->siglock); + } + read_unlock(&tasklist_lock); +- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0, +- &s, sizeof(s)); ++ ++ if (!err) ++ audit_send_reply(NETLINK_CB(skb).pid, seq, ++ AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); + break; + } + case AUDIT_TTY_SET: { +@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, + avail = audit_expand(ab, + max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); + if (!avail) +- goto out; ++ goto out_va_end; + len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); + } +- va_end(args2); + if (len > 0) + skb_put(skb, len); ++out_va_end: ++ va_end(args2); + out: + return; + } +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index 267e484..ac41bc3 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context, + struct audit_buffer **ab, + struct audit_aux_data_execve *axi) + { +- int i; +- size_t len, len_sent = 0; ++ int i, len; ++ size_t len_sent = 0; + const char __user *p; + char *buf; + +@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx, + } + + /* global counter which is incremented every time something logs in */ +-static atomic_t session_id = ATOMIC_INIT(0); ++static atomic_unchecked_t session_id = ATOMIC_INIT(0); + + /** + * audit_set_loginuid - set a task's audit_context loginuid +@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0); + */ + int audit_set_loginuid(struct task_struct *task, uid_t loginuid) + { +- unsigned int sessionid = atomic_inc_return(&session_id); ++ unsigned int sessionid = atomic_inc_return_unchecked(&session_id); + struct audit_context *context = task->audit_context; + + if (context && context->in_syscall) { +diff --git a/kernel/capability.c b/kernel/capability.c +index 8a944f5..db5001e 100644 +--- a/kernel/capability.c ++++ b/kernel/capability.c +@@ -305,10 +305,26 @@ int capable(int cap) + BUG(); + } + +- if (security_capable(cap) == 0) { ++ if (security_capable(cap) == 0 && gr_is_capable(cap)) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; + } ++ ++int capable_nolog(int cap) ++{ ++ if (unlikely(!cap_valid(cap))) { ++ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); ++ BUG(); ++ } ++ ++ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) { ++ current->flags |= PF_SUPERPRIV; ++ return 1; ++ } ++ return 0; ++} ++ + EXPORT_SYMBOL(capable); ++EXPORT_SYMBOL(capable_nolog); +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index 1fbcc74..7000012 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -536,6 +536,8 @@ static struct css_set *find_css_set( + struct hlist_head *hhead; + struct cg_cgroup_link *link; + ++ pax_track_stack(); ++ + /* First see if we already have a cgroup group that matches + * the desired set */ + read_lock(&css_set_lock); +diff --git a/kernel/compat.c b/kernel/compat.c +index 8bc5578..186e44a 100644 +--- a/kernel/compat.c ++++ b/kernel/compat.c +@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) + mm_segment_t oldfs; + long ret; + +- restart->nanosleep.rmtp = (struct timespec __user *) &rmt; ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt; + oldfs = get_fs(); + set_fs(KERNEL_DS); + ret = hrtimer_nanosleep_restart(restart); +@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, + oldfs = get_fs(); + set_fs(KERNEL_DS); + ret = hrtimer_nanosleep(&tu, +- rmtp ? (struct timespec __user *)&rmt : NULL, ++ rmtp ? (struct timespec __force_user *)&rmt : NULL, + HRTIMER_MODE_REL, CLOCK_MONOTONIC); + set_fs(oldfs); + +@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_sigpending((old_sigset_t __user *) &s); ++ ret = sys_sigpending((old_sigset_t __force_user *) &s); + set_fs(old_fs); + if (ret == 0) + ret = put_user(s, set); +@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_sigprocmask(how, +- set ? (old_sigset_t __user *) &s : NULL, +- oset ? (old_sigset_t __user *) &s : NULL); ++ set ? (old_sigset_t __force_user *) &s : NULL, ++ oset ? (old_sigset_t __force_user *) &s : NULL); + set_fs(old_fs); + if (ret == 0) + if (oset) +@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource, + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_old_getrlimit(resource, &r); ++ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r); + set_fs(old_fs); + + if (!ret) { +@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_getrusage(who, (struct rusage __user *) &r); ++ ret = sys_getrusage(who, (struct rusage __force_user *) &r); + set_fs(old_fs); + + if (ret) +@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, + set_fs (KERNEL_DS); + ret = sys_wait4(pid, + (stat_addr ? +- (unsigned int __user *) &status : NULL), +- options, (struct rusage __user *) &r); ++ (unsigned int __force_user *) &status : NULL), ++ options, (struct rusage __force_user *) &r); + set_fs (old_fs); + + if (ret > 0) { +@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, + memset(&info, 0, sizeof(info)); + + set_fs(KERNEL_DS); +- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, +- uru ? (struct rusage __user *)&ru : NULL); ++ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options, ++ uru ? (struct rusage __force_user *)&ru : NULL); + set_fs(old_fs); + + if ((ret < 0) || (info.si_signo == 0)) +@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_timer_settime(timer_id, flags, +- (struct itimerspec __user *) &newts, +- (struct itimerspec __user *) &oldts); ++ (struct itimerspec __force_user *) &newts, ++ (struct itimerspec __force_user *) &oldts); + set_fs(oldfs); + if (!err && old && put_compat_itimerspec(old, &oldts)) + return -EFAULT; +@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_timer_gettime(timer_id, +- (struct itimerspec __user *) &ts); ++ (struct itimerspec __force_user *) &ts); + set_fs(oldfs); + if (!err && put_compat_itimerspec(setting, &ts)) + return -EFAULT; +@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_settime(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + return err; + } +@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_gettime(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + if (!err && put_compat_timespec(&ts, tp)) + return -EFAULT; +@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_getres(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + if (!err && tp && put_compat_timespec(&ts, tp)) + return -EFAULT; +@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) + long err; + mm_segment_t oldfs; + struct timespec tu; +- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; ++ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp; + +- restart->nanosleep.rmtp = (struct timespec __user *) &tu; ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu; + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = clock_nanosleep_restart(restart); +@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_nanosleep(which_clock, flags, +- (struct timespec __user *) &in, +- (struct timespec __user *) &out); ++ (struct timespec __force_user *) &in, ++ (struct timespec __force_user *) &out); + set_fs(oldfs); + + if ((err == -ERESTART_RESTARTBLOCK) && rmtp && +diff --git a/kernel/configs.c b/kernel/configs.c +index abaee68..047facd 100644 +--- a/kernel/configs.c ++++ b/kernel/configs.c +@@ -73,8 +73,19 @@ static int __init ikconfig_init(void) + struct proc_dir_entry *entry; + + /* create the current config file */ ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL, ++ &ikconfig_file_ops); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL, ++ &ikconfig_file_ops); ++#endif ++#else + entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, + &ikconfig_file_ops); ++#endif ++ + if (!entry) + return -ENOMEM; + +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 3f2f04f..4e53ded 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -20,7 +20,7 @@ + /* Serializes the updates to cpu_online_mask, cpu_present_mask */ + static DEFINE_MUTEX(cpu_add_remove_lock); + +-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); ++static RAW_NOTIFIER_HEAD(cpu_chain); + + /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. + * Should always be manipulated under cpu_add_remove_lock +diff --git a/kernel/cred.c b/kernel/cred.c +index 0b5b5fc..f20c6b9 100644 +--- a/kernel/cred.c ++++ b/kernel/cred.c +@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu) + */ + void __put_cred(struct cred *cred) + { ++ pax_track_stack(); ++ + kdebug("__put_cred(%p{%d,%d})", cred, + atomic_read(&cred->usage), + read_cred_subscribers(cred)); +@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk) + { + struct cred *cred; + ++ pax_track_stack(); ++ + kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred, + atomic_read(&tsk->cred->usage), + read_cred_subscribers(tsk->cred)); +@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk) + validate_creds(cred); + put_cred(cred); + } ++ ++#ifdef CONFIG_GRKERNSEC_SETXID ++ cred = (struct cred *) tsk->delayed_cred; ++ if (cred) { ++ tsk->delayed_cred = NULL; ++ validate_creds(cred); ++ put_cred(cred); ++ } ++#endif + } + + /** +@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task) + { + const struct cred *cred; + ++ pax_track_stack(); ++ + rcu_read_lock(); + + do { +@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void) + { + struct cred *new; + ++ pax_track_stack(); ++ + new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); + if (!new) + return NULL; +@@ -289,6 +306,8 @@ struct cred *prepare_creds(void) + const struct cred *old; + struct cred *new; + ++ pax_track_stack(); ++ + validate_process_creds(); + + new = kmem_cache_alloc(cred_jar, GFP_KERNEL); +@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void) + struct thread_group_cred *tgcred = NULL; + struct cred *new; + ++ pax_track_stack(); ++ + #ifdef CONFIG_KEYS + tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); + if (!tgcred) +@@ -441,8 +462,12 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) + struct cred *new; + int ret; + ++ pax_track_stack(); ++ + mutex_init(&p->cred_guard_mutex); + ++ p->replacement_session_keyring = NULL; ++ + if ( + #ifdef CONFIG_KEYS + !p->cred->thread_keyring && +@@ -523,11 +548,13 @@ error_put: + * Always returns 0 thus allowing this function to be tail-called at the end + * of, say, sys_setgid(). + */ +-int commit_creds(struct cred *new) ++static int __commit_creds(struct cred *new) + { + struct task_struct *task = current; + const struct cred *old = task->real_cred; + ++ pax_track_stack(); ++ + kdebug("commit_creds(%p{%d,%d})", new, + atomic_read(&new->usage), + read_cred_subscribers(new)); +@@ -544,6 +571,8 @@ int commit_creds(struct cred *new) + + get_cred(new); /* we will require a ref for the subj creds too */ + ++ gr_set_role_label(task, new->uid, new->gid); ++ + /* dumpability changes */ + if (old->euid != new->euid || + old->egid != new->egid || +@@ -563,10 +592,8 @@ int commit_creds(struct cred *new) + key_fsgid_changed(task); + + /* do it +- * - What if a process setreuid()'s and this brings the +- * new uid over his NPROC rlimit? We can check this now +- * cheaply with the new uid cache, so if it matters +- * we should be checking for it. -DaveM ++ * RLIMIT_NPROC limits on user->processes have already been checked ++ * in set_user(). + */ + alter_cred_subscribers(new, 2); + if (new->user != old->user) +@@ -595,8 +622,105 @@ int commit_creds(struct cred *new) + put_cred(old); + return 0; + } ++ ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern int set_user(struct cred *new); ++ ++void gr_delayed_cred_worker(void) ++{ ++ const struct cred *new = current->delayed_cred; ++ struct cred *ncred; ++ ++ current->delayed_cred = NULL; ++ ++ if (current_uid() && new != NULL) { ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ return; ++ } else if (new == NULL) ++ return; ++ ++ ncred = prepare_creds(); ++ if (!ncred) ++ goto die; ++ // uids ++ ncred->uid = new->uid; ++ ncred->euid = new->euid; ++ ncred->suid = new->suid; ++ ncred->fsuid = new->fsuid; ++ // gids ++ ncred->gid = new->gid; ++ ncred->egid = new->egid; ++ ncred->sgid = new->sgid; ++ ncred->fsgid = new->fsgid; ++ // groups ++ if (set_groups(ncred, new->group_info) < 0) { ++ abort_creds(ncred); ++ goto die; ++ } ++ // caps ++ ncred->securebits = new->securebits; ++ ncred->cap_inheritable = new->cap_inheritable; ++ ncred->cap_permitted = new->cap_permitted; ++ ncred->cap_effective = new->cap_effective; ++ ncred->cap_bset = new->cap_bset; ++ ++ if (set_user(ncred)) { ++ abort_creds(ncred); ++ goto die; ++ } ++ ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ ++ __commit_creds(ncred); ++ return; ++die: ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ do_group_exit(SIGKILL); ++} ++#endif ++ ++int commit_creds(struct cred *new) ++{ ++#ifdef CONFIG_GRKERNSEC_SETXID ++ int ret; ++ int schedule_it = 0; ++ struct task_struct *t; ++ ++ /* we won't get called with tasklist_lock held for writing ++ and interrupts disabled as the cred struct in that case is ++ init_cred ++ */ ++ if (grsec_enable_setxid && !current_is_single_threaded() && ++ !current_uid() && new->uid) { ++ schedule_it = 1; ++ } ++ ret = __commit_creds(new); ++ if (schedule_it) { ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ for (t = next_thread(current); t != current; ++ t = next_thread(t)) { ++ if (t->delayed_cred == NULL) { ++ t->delayed_cred = get_cred(new); ++ set_tsk_thread_flag(t, TIF_GRSEC_SETXID); ++ set_tsk_need_resched(t); ++ } ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ } ++ return ret; ++#else ++ return __commit_creds(new); ++#endif ++} ++ + EXPORT_SYMBOL(commit_creds); + ++ + /** + * abort_creds - Discard a set of credentials and unlock the current task + * @new: The credentials that were going to be applied +@@ -606,6 +730,8 @@ EXPORT_SYMBOL(commit_creds); + */ + void abort_creds(struct cred *new) + { ++ pax_track_stack(); ++ + kdebug("abort_creds(%p{%d,%d})", new, + atomic_read(&new->usage), + read_cred_subscribers(new)); +@@ -629,6 +755,8 @@ const struct cred *override_creds(const struct cred *new) + { + const struct cred *old = current->cred; + ++ pax_track_stack(); ++ + kdebug("override_creds(%p{%d,%d})", new, + atomic_read(&new->usage), + read_cred_subscribers(new)); +@@ -658,6 +786,8 @@ void revert_creds(const struct cred *old) + { + const struct cred *override = current->cred; + ++ pax_track_stack(); ++ + kdebug("revert_creds(%p{%d,%d})", old, + atomic_read(&old->usage), + read_cred_subscribers(old)); +@@ -704,6 +834,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) + const struct cred *old; + struct cred *new; + ++ pax_track_stack(); ++ + new = kmem_cache_alloc(cred_jar, GFP_KERNEL); + if (!new) + return NULL; +@@ -758,6 +890,8 @@ EXPORT_SYMBOL(prepare_kernel_cred); + */ + int set_security_override(struct cred *new, u32 secid) + { ++ pax_track_stack(); ++ + return security_kernel_act_as(new, secid); + } + EXPORT_SYMBOL(set_security_override); +@@ -777,6 +911,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx) + u32 secid; + int ret; + ++ pax_track_stack(); ++ + ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); + if (ret < 0) + return ret; +diff --git a/kernel/exit.c b/kernel/exit.c +index 0f8fae3..66af9b1 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -55,6 +55,10 @@ + #include <asm/pgtable.h> + #include <asm/mmu_context.h> + ++#ifdef CONFIG_GRKERNSEC ++extern rwlock_t grsec_exec_file_lock; ++#endif ++ + static void exit_mm(struct task_struct * tsk); + + static void __unhash_process(struct task_struct *p) +@@ -174,6 +178,10 @@ void release_task(struct task_struct * p) + struct task_struct *leader; + int zap_leader; + repeat: ++#ifdef CONFIG_NET ++ gr_del_task_from_ip_table(p); ++#endif ++ + tracehook_prepare_release_task(p); + /* don't need to get the RCU readlock here - the process is dead and + * can't be modifying its own credentials */ +@@ -397,7 +405,7 @@ int allow_signal(int sig) + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ +- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; ++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + return 0; +@@ -433,6 +441,17 @@ void daemonize(const char *name, ...) + vsnprintf(current->comm, sizeof(current->comm), name, args); + va_end(args); + ++#ifdef CONFIG_GRKERNSEC ++ write_lock(&grsec_exec_file_lock); ++ if (current->exec_file) { ++ fput(current->exec_file); ++ current->exec_file = NULL; ++ } ++ write_unlock(&grsec_exec_file_lock); ++#endif ++ ++ gr_set_kernel_label(current); ++ + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them +@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code) + struct task_struct *tsk = current; + int group_dead; + +- profile_task_exit(tsk); +- +- WARN_ON(atomic_read(&tsk->fs_excl)); +- ++ /* ++ * Check this first since set_fs() below depends on ++ * current_thread_info(), which we better not access when we're in ++ * interrupt context. Other than that, we want to do the set_fs() ++ * as early as possible. ++ */ + if (unlikely(in_interrupt())) + panic("Aiee, killing interrupt handler!"); +- if (unlikely(!tsk->pid)) +- panic("Attempted to kill the idle task!"); + + /* +- * If do_exit is called because this processes oopsed, it's possible ++ * If do_exit is called because this processes Oops'ed, it's possible + * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before + * continuing. Amongst other possible reasons, this is to prevent + * mm_release()->clear_child_tid() from writing to a user-controlled +@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code) + */ + set_fs(USER_DS); + ++ profile_task_exit(tsk); ++ ++ WARN_ON(atomic_read(&tsk->fs_excl)); ++ ++ if (unlikely(!tsk->pid)) ++ panic("Attempted to kill the idle task!"); ++ + tracehook_report_exit(&code); + + validate_creds_for_do_exit(tsk); +@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code) + tsk->exit_code = code; + taskstats_exit(tsk, group_dead); + ++ gr_acl_handle_psacct(tsk, code); ++ gr_acl_handle_exit(); ++ + exit_mm(tsk); + + if (group_dead) +@@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code) + tsk->flags |= PF_EXITPIDONE; + + if (tsk->io_context) +- exit_io_context(); ++ exit_io_context(tsk); + + if (tsk->splice_pipe) + __free_pipe_info(tsk->splice_pipe); +@@ -1059,7 +1088,7 @@ SYSCALL_DEFINE1(exit, int, error_code) + * Take down every thread in the group. This is called by fatal signals + * as well as by sys_exit_group (below). + */ +-NORET_TYPE void ++__noreturn void + do_group_exit(int exit_code) + { + struct signal_struct *sig = current->signal; +@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) + + if (unlikely(wo->wo_flags & WNOWAIT)) { + int exit_code = p->exit_code; +- int why, status; ++ int why; + + get_task_struct(p); + read_unlock(&tasklist_lock); +diff --git a/kernel/fork.c b/kernel/fork.c +index 4bde56f..8976a8f 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) + *stackend = STACK_END_MAGIC; /* for overflow detection */ + + #ifdef CONFIG_CC_STACKPROTECTOR +- tsk->stack_canary = get_random_int(); ++ tsk->stack_canary = pax_get_random_long(); + #endif + + /* One for us, one for whoever does the "release_task()" (usually parent) */ +@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + mm->locked_vm = 0; + mm->mmap = NULL; + mm->mmap_cache = NULL; +- mm->free_area_cache = oldmm->mmap_base; +- mm->cached_hole_size = ~0UL; ++ mm->free_area_cache = oldmm->free_area_cache; ++ mm->cached_hole_size = oldmm->cached_hole_size; + mm->map_count = 0; + cpumask_clear(mm_cpumask(mm)); + mm->mm_rb = RB_ROOT; +@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + tmp->vm_flags &= ~VM_LOCKED; + tmp->vm_mm = mm; + tmp->vm_next = tmp->vm_prev = NULL; ++ tmp->vm_mirror = NULL; + anon_vma_link(tmp); + file = tmp->vm_file; + if (file) { +@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + if (retval) + goto out; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) { ++ struct vm_area_struct *mpnt_m; ++ ++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) { ++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm); ++ ++ if (!mpnt->vm_mirror) ++ continue; ++ ++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) { ++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt); ++ mpnt->vm_mirror = mpnt_m; ++ } else { ++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm); ++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror; ++ mpnt_m->vm_mirror->vm_mirror = mpnt_m; ++ mpnt->vm_mirror->vm_mirror = mpnt; ++ } ++ } ++ BUG_ON(mpnt_m); ++ } ++#endif ++ + /* a new mm has just been created */ + arch_dup_mmap(oldmm, mm); + retval = 0; +@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) + write_unlock(&fs->lock); + return -EAGAIN; + } +- fs->users++; ++ atomic_inc(&fs->users); + write_unlock(&fs->lock); + return 0; + } + tsk->fs = copy_fs_struct(fs); + if (!tsk->fs) + return -ENOMEM; ++ gr_set_chroot_entries(tsk, &tsk->fs->root); + return 0; + } + +@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags, + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); + #endif + retval = -EAGAIN; ++ ++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0); ++ + if (atomic_read(&p->real_cred->user->processes) >= + p->signal->rlim[RLIMIT_NPROC].rlim_cur) { +- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && +- p->real_cred->user != INIT_USER) ++ if (p->real_cred->user != INIT_USER && ++ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) + goto bad_fork_free; + } ++ current->flags &= ~PF_NPROC_EXCEEDED; + + retval = copy_creds(p, clone_flags); + if (retval < 0) +@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, + goto bad_fork_free_pid; + } + ++ gr_copy_label(p); ++ + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; + /* + * Clear TID on mm_release()? +@@ -1299,7 +1332,8 @@ bad_fork_free_pid: + if (pid != &init_struct_pid) + free_pid(pid); + bad_fork_cleanup_io: +- put_io_context(p->io_context); ++ if (p->io_context) ++ exit_io_context(p); + bad_fork_cleanup_namespaces: + exit_task_namespaces(p); + bad_fork_cleanup_mm: +@@ -1333,6 +1367,8 @@ bad_fork_cleanup_count: + bad_fork_free: + free_task(p); + fork_out: ++ gr_log_forkfail(retval); ++ + return ERR_PTR(retval); + } + +@@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags, + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + ++ gr_handle_brute_check(); ++ + if (clone_flags & CLONE_VFORK) { + p->vfork_done = &vfork; + init_completion(&vfork); +@@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) + return 0; + + /* don't need lock here; in the worst case we'll do useless copy */ +- if (fs->users == 1) ++ if (atomic_read(&fs->users) == 1) + return 0; + + *new_fsp = copy_fs_struct(fs); +@@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) + fs = current->fs; + write_lock(&fs->lock); + current->fs = new_fs; +- if (--fs->users) ++ gr_set_chroot_entries(current, ¤t->fs->root); ++ if (atomic_dec_return(&fs->users)) + new_fs = NULL; + else + new_fs = fs; +diff --git a/kernel/futex.c b/kernel/futex.c +index fb98c9f..333faec 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -54,6 +54,7 @@ + #include <linux/mount.h> + #include <linux/pagemap.h> + #include <linux/syscalls.h> ++#include <linux/ptrace.h> + #include <linux/signal.h> + #include <linux/module.h> + #include <linux/magic.h> +@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) + struct page *page; + int err, ro = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE) ++ return -EFAULT; ++#endif ++ + /* + * The futex address must be "naturally" aligned. + */ +@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared, + struct futex_q q; + int ret; + ++ pax_track_stack(); ++ + if (!bitset) + return -EINVAL; + +@@ -1871,7 +1879,7 @@ retry: + + restart = ¤t_thread_info()->restart_block; + restart->fn = futex_wait_restart; +- restart->futex.uaddr = (u32 *)uaddr; ++ restart->futex.uaddr = uaddr; + restart->futex.val = val; + restart->futex.time = abs_time->tv64; + restart->futex.bitset = bitset; +@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, + struct futex_q q; + int res, ret; + ++ pax_track_stack(); ++ + if (!bitset) + return -EINVAL; + +@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, + if (!p) + goto err_unlock; + ret = -EPERM; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ goto err_unlock; ++#endif + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid && +@@ -2489,7 +2503,7 @@ retry: + */ + static inline int fetch_robust_entry(struct robust_list __user **entry, + struct robust_list __user * __user *head, +- int *pi) ++ unsigned int *pi) + { + unsigned long uentry; + +@@ -2670,6 +2684,7 @@ static int __init futex_init(void) + { + u32 curval; + int i; ++ mm_segment_t oldfs; + + /* + * This will fail and we want it. Some arch implementations do +@@ -2681,7 +2696,10 @@ static int __init futex_init(void) + * implementation, the non functional ones will return + * -ENOSYS. + */ ++ oldfs = get_fs(); ++ set_fs(USER_DS); + curval = cmpxchg_futex_value_locked(NULL, 0, 0); ++ set_fs(oldfs); + if (curval == -EFAULT) + futex_cmpxchg_enabled = 1; + +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +index 2357165..eb25501 100644 +--- a/kernel/futex_compat.c ++++ b/kernel/futex_compat.c +@@ -10,6 +10,7 @@ + #include <linux/compat.h> + #include <linux/nsproxy.h> + #include <linux/futex.h> ++#include <linux/ptrace.h> + + #include <asm/uaccess.h> + +@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + { + struct compat_robust_list_head __user *head; + unsigned long ret; +- const struct cred *cred = current_cred(), *pcred; ++ const struct cred *cred = current_cred(); ++ const struct cred *pcred; + + if (!futex_cmpxchg_enabled) + return -ENOSYS; +@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + if (!p) + goto err_unlock; + ret = -EPERM; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ goto err_unlock; ++#endif + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid && +diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c +index 9b22d03..6295b62 100644 +--- a/kernel/gcov/base.c ++++ b/kernel/gcov/base.c +@@ -102,11 +102,6 @@ void gcov_enable_events(void) + } + + #ifdef CONFIG_MODULES +-static inline int within(void *addr, void *start, unsigned long size) +-{ +- return ((addr >= start) && (addr < start + size)); +-} +- + /* Update list and generate events when modules are unloaded. */ + static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + void *data) +@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + prev = NULL; + /* Remove entries located in module from linked list. */ + for (info = gcov_info_head; info; info = info->next) { +- if (within(info, mod->module_core, mod->core_size)) { ++ if (within_module_core_rw((unsigned long)info, mod)) { + if (prev) + prev->next = info->next; + else +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c +index a6e9d00..a0da4f9 100644 +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void) + local_irq_restore(flags); + } + +-static void run_hrtimer_softirq(struct softirq_action *h) ++static void run_hrtimer_softirq(void) + { + hrtimer_peek_ahead_timers(); + } +diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c +index 8b6b8b6..6bc87df 100644 +--- a/kernel/kallsyms.c ++++ b/kernel/kallsyms.c +@@ -11,6 +11,9 @@ + * Changed the compression method from stem compression to "table lookup" + * compression (see scripts/kallsyms.c for a more complete description) + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kallsyms.h> + #include <linux/module.h> + #include <linux/init.h> +@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak)); + + static inline int is_kernel_inittext(unsigned long addr) + { ++ if (system_state != SYSTEM_BOOTING) ++ return 0; ++ + if (addr >= (unsigned long)_sinittext + && addr <= (unsigned long)_einittext) + return 1; + return 0; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#ifdef CONFIG_MODULES ++static inline int is_module_text(unsigned long addr) ++{ ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END) ++ return 1; ++ ++ addr = ktla_ktva(addr); ++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END; ++} ++#else ++static inline int is_module_text(unsigned long addr) ++{ ++ return 0; ++} ++#endif ++#endif ++ + static inline int is_kernel_text(unsigned long addr) + { + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || +@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr) + + static inline int is_kernel(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_kernel_text(addr) || is_kernel_inittext(addr)) ++ return 1; ++ ++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end) ++#else + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) ++#endif ++ + return 1; + return in_gate_area_no_task(addr); + } + + static int is_ksym_addr(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_module_text(addr)) ++ return 0; ++#endif ++ + if (all_var) + return is_kernel(addr); + +@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) + + static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) + { +- iter->name[0] = '\0'; + iter->nameoff = get_symbol_offset(new_pos); + iter->pos = new_pos; + } +@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p) + { + struct kallsym_iter *iter = m->private; + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ if (current_uid()) ++ return 0; ++#endif ++ + /* Some debugging symbols have no name. Ignore them. */ + if (!iter->name[0]) + return 0; +@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file) + struct kallsym_iter *iter; + int ret; + +- iter = kmalloc(sizeof(*iter), GFP_KERNEL); ++ iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + reset_iter(iter, 0); +diff --git a/kernel/kexec.c b/kernel/kexec.c +index f336e21..9c1c20b 100644 +--- a/kernel/kexec.c ++++ b/kernel/kexec.c +@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, + unsigned long flags) + { + struct compat_kexec_segment in; +- struct kexec_segment out, __user *ksegments; ++ struct kexec_segment out; ++ struct kexec_segment __user *ksegments; + unsigned long i, result; + + /* Don't allow clients that don't understand the native +diff --git a/kernel/kgdb.c b/kernel/kgdb.c +index 53dae4b..9ba3743 100644 +--- a/kernel/kgdb.c ++++ b/kernel/kgdb.c +@@ -86,7 +86,7 @@ static int kgdb_io_module_registered; + /* Guard for recursive entry */ + static int exception_level; + +-static struct kgdb_io *kgdb_io_ops; ++static const struct kgdb_io *kgdb_io_ops; + static DEFINE_SPINLOCK(kgdb_registration_lock); + + /* kgdb console driver is loaded */ +@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1); + */ + static atomic_t passive_cpu_wait[NR_CPUS]; + static atomic_t cpu_in_kgdb[NR_CPUS]; +-atomic_t kgdb_setting_breakpoint; ++atomic_unchecked_t kgdb_setting_breakpoint; + + struct task_struct *kgdb_usethread; + struct task_struct *kgdb_contthread; +@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES + + sizeof(unsigned long)]; + + /* to keep track of the CPU which is doing the single stepping*/ +-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); ++atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); + + /* + * If you are debugging a problem where roundup (the collection of +@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait) + return 0; + if (kgdb_connected) + return 1; +- if (atomic_read(&kgdb_setting_breakpoint)) ++ if (atomic_read_unchecked(&kgdb_setting_breakpoint)) + return 1; + if (print_wait) + printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); +@@ -1426,8 +1426,8 @@ acquirelock: + * instance of the exception handler wanted to come into the + * debugger on a different CPU via a single step + */ +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && +- atomic_read(&kgdb_cpu_doing_single_step) != cpu) { ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 && ++ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) { + + atomic_set(&kgdb_active, -1); + touch_softlockup_watchdog(); +@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void) + * + * Register it with the KGDB core. + */ +-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops) ++int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops) + { + int err; + +@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module); + * + * Unregister it with the KGDB core. + */ +-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops) ++void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops) + { + BUG_ON(kgdb_connected); + +@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); + */ + void kgdb_breakpoint(void) + { +- atomic_set(&kgdb_setting_breakpoint, 1); ++ atomic_set_unchecked(&kgdb_setting_breakpoint, 1); + wmb(); /* Sync point before breakpoint */ + arch_kgdb_breakpoint(); + wmb(); /* Sync point after breakpoint */ +- atomic_set(&kgdb_setting_breakpoint, 0); ++ atomic_set_unchecked(&kgdb_setting_breakpoint, 0); + } + EXPORT_SYMBOL_GPL(kgdb_breakpoint); + +diff --git a/kernel/kmod.c b/kernel/kmod.c +index a061472..40884b6 100644 +--- a/kernel/kmod.c ++++ b/kernel/kmod.c +@@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; + * If module auto-loading support is disabled then this function + * becomes a no-operation. + */ +-int __request_module(bool wait, const char *fmt, ...) ++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap) + { +- va_list args; + char module_name[MODULE_NAME_LEN]; + unsigned int max_modprobes; + int ret; +- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL }; ++ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL }; + static char *envp[] = { "HOME=/", + "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", +@@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...) + if (ret) + return ret; + +- va_start(args, fmt); +- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); +- va_end(args); ++ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap); + if (ret >= MODULE_NAME_LEN) + return -ENAMETOOLONG; + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (!current_uid()) { ++ /* hack to workaround consolekit/udisks stupidity */ ++ read_lock(&tasklist_lock); ++ if (!strcmp(current->comm, "mount") && ++ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) { ++ read_unlock(&tasklist_lock); ++ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name); ++ return -EPERM; ++ } ++ read_unlock(&tasklist_lock); ++ } ++#endif ++ + /* If modprobe needs a service that is in a module, we get a recursive + * loop. Limit the number of running kmod threads to max_threads/2 or + * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method +@@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...) + atomic_dec(&kmod_concurrent); + return ret; + } ++ ++int ___request_module(bool wait, char *module_param, const char *fmt, ...) ++{ ++ va_list args; ++ int ret; ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, module_param, fmt, args); ++ va_end(args); ++ ++ return ret; ++} ++ ++int __request_module(bool wait, const char *fmt, ...) ++{ ++ va_list args; ++ int ret; ++ ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (current_uid()) { ++ char module_param[MODULE_NAME_LEN]; ++ ++ memset(module_param, 0, sizeof(module_param)); ++ ++ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid()); ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, module_param, fmt, args); ++ va_end(args); ++ ++ return ret; ++ } ++#endif ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, NULL, fmt, args); ++ va_end(args); ++ ++ return ret; ++} ++ ++ + EXPORT_SYMBOL(__request_module); + #endif /* CONFIG_MODULES */ + +@@ -231,7 +284,7 @@ static int wait_for_helper(void *data) + * + * Thus the __user pointer cast is valid here. + */ +- sys_wait4(pid, (int __user *)&ret, 0, NULL); ++ sys_wait4(pid, (int __force_user *)&ret, 0, NULL); + + /* + * If ret is 0, either ____call_usermodehelper failed and the +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 176d825..77fa8ea 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void) + * kernel image and loaded module images reside. This is required + * so x86_64 can correctly handle the %rip-relative fixups. + */ +- kip->insns = module_alloc(PAGE_SIZE); ++ kip->insns = module_alloc_exec(PAGE_SIZE); + if (!kip->insns) { + kfree(kip); + return NULL; +@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) + */ + if (!list_is_singular(&kprobe_insn_pages)) { + list_del(&kip->list); +- module_free(NULL, kip->insns); ++ module_free_exec(NULL, kip->insns); + kfree(kip); + } + return 1; +@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void) + { + int i, err = 0; + unsigned long offset = 0, size = 0; +- char *modname, namebuf[128]; ++ char *modname, namebuf[KSYM_NAME_LEN]; + const char *symbol_name; + void *addr; + struct kprobe_blackpoint *kb; +@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) + const char *sym = NULL; + unsigned int i = *(loff_t *) v; + unsigned long offset = 0; +- char *modname, namebuf[128]; ++ char *modname, namebuf[KSYM_NAME_LEN]; + + head = &kprobe_table[i]; + preempt_disable(); +diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c +index 9cd2b1c..ab201ef 100644 +--- a/kernel/ksysfs.c ++++ b/kernel/ksysfs.c +@@ -45,6 +45,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj, + { + if (count+1 > UEVENT_HELPER_PATH_LEN) + return -ENOENT; ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; + memcpy(uevent_helper, buf, count); + uevent_helper[count] = '\0'; + if (count && uevent_helper[count-1] == '\n') +diff --git a/kernel/lockdep.c b/kernel/lockdep.c +index d86fe89..d12fc66 100644 +--- a/kernel/lockdep.c ++++ b/kernel/lockdep.c +@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = { + /* + * Various lockdep statistics: + */ +-atomic_t chain_lookup_hits; +-atomic_t chain_lookup_misses; +-atomic_t hardirqs_on_events; +-atomic_t hardirqs_off_events; +-atomic_t redundant_hardirqs_on; +-atomic_t redundant_hardirqs_off; +-atomic_t softirqs_on_events; +-atomic_t softirqs_off_events; +-atomic_t redundant_softirqs_on; +-atomic_t redundant_softirqs_off; +-atomic_t nr_unused_locks; +-atomic_t nr_cyclic_checks; +-atomic_t nr_find_usage_forwards_checks; +-atomic_t nr_find_usage_backwards_checks; ++atomic_unchecked_t chain_lookup_hits; ++atomic_unchecked_t chain_lookup_misses; ++atomic_unchecked_t hardirqs_on_events; ++atomic_unchecked_t hardirqs_off_events; ++atomic_unchecked_t redundant_hardirqs_on; ++atomic_unchecked_t redundant_hardirqs_off; ++atomic_unchecked_t softirqs_on_events; ++atomic_unchecked_t softirqs_off_events; ++atomic_unchecked_t redundant_softirqs_on; ++atomic_unchecked_t redundant_softirqs_off; ++atomic_unchecked_t nr_unused_locks; ++atomic_unchecked_t nr_cyclic_checks; ++atomic_unchecked_t nr_find_usage_forwards_checks; ++atomic_unchecked_t nr_find_usage_backwards_checks; + #endif + + /* +@@ -577,6 +577,10 @@ static int static_obj(void *obj) + int i; + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++ start = ktla_ktva(start); ++#endif ++ + /* + * static variable? + */ +@@ -592,8 +596,7 @@ static int static_obj(void *obj) + */ + for_each_possible_cpu(i) { + start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); +- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM +- + per_cpu_offset(i); ++ end = start + PERCPU_ENOUGH_ROOM; + + if ((addr >= start) && (addr < end)) + return 1; +@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) + if (!static_obj(lock->key)) { + debug_locks_off(); + printk("INFO: trying to register non-static key.\n"); ++ printk("lock:%pS key:%pS.\n", lock, lock->key); + printk("the code is fine but needs lockdep annotation.\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); +@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, + if (!class) + return 0; + } +- debug_atomic_inc((atomic_t *)&class->ops); ++ debug_atomic_inc((atomic_unchecked_t *)&class->ops); + if (very_verbose(class)) { + printk("\nacquire class [%p] %s", class->key, class->name); + if (class->name_version > 1) +diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h +index a2ee95a..092f0f2 100644 +--- a/kernel/lockdep_internals.h ++++ b/kernel/lockdep_internals.h +@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class) + /* + * Various lockdep statistics: + */ +-extern atomic_t chain_lookup_hits; +-extern atomic_t chain_lookup_misses; +-extern atomic_t hardirqs_on_events; +-extern atomic_t hardirqs_off_events; +-extern atomic_t redundant_hardirqs_on; +-extern atomic_t redundant_hardirqs_off; +-extern atomic_t softirqs_on_events; +-extern atomic_t softirqs_off_events; +-extern atomic_t redundant_softirqs_on; +-extern atomic_t redundant_softirqs_off; +-extern atomic_t nr_unused_locks; +-extern atomic_t nr_cyclic_checks; +-extern atomic_t nr_cyclic_check_recursions; +-extern atomic_t nr_find_usage_forwards_checks; +-extern atomic_t nr_find_usage_forwards_recursions; +-extern atomic_t nr_find_usage_backwards_checks; +-extern atomic_t nr_find_usage_backwards_recursions; +-# define debug_atomic_inc(ptr) atomic_inc(ptr) +-# define debug_atomic_dec(ptr) atomic_dec(ptr) +-# define debug_atomic_read(ptr) atomic_read(ptr) ++extern atomic_unchecked_t chain_lookup_hits; ++extern atomic_unchecked_t chain_lookup_misses; ++extern atomic_unchecked_t hardirqs_on_events; ++extern atomic_unchecked_t hardirqs_off_events; ++extern atomic_unchecked_t redundant_hardirqs_on; ++extern atomic_unchecked_t redundant_hardirqs_off; ++extern atomic_unchecked_t softirqs_on_events; ++extern atomic_unchecked_t softirqs_off_events; ++extern atomic_unchecked_t redundant_softirqs_on; ++extern atomic_unchecked_t redundant_softirqs_off; ++extern atomic_unchecked_t nr_unused_locks; ++extern atomic_unchecked_t nr_cyclic_checks; ++extern atomic_unchecked_t nr_cyclic_check_recursions; ++extern atomic_unchecked_t nr_find_usage_forwards_checks; ++extern atomic_unchecked_t nr_find_usage_forwards_recursions; ++extern atomic_unchecked_t nr_find_usage_backwards_checks; ++extern atomic_unchecked_t nr_find_usage_backwards_recursions; ++# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr) ++# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr) ++# define debug_atomic_read(ptr) atomic_read_unchecked(ptr) + #else + # define debug_atomic_inc(ptr) do { } while (0) + # define debug_atomic_dec(ptr) do { } while (0) +diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c +index d4aba4f..02a353f 100644 +--- a/kernel/lockdep_proc.c ++++ b/kernel/lockdep_proc.c +@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v) + + static void print_name(struct seq_file *m, struct lock_class *class) + { +- char str[128]; ++ char str[KSYM_NAME_LEN]; + const char *name = class->name; + + if (!name) { +diff --git a/kernel/module.c b/kernel/module.c +index 4b270e6..2efdb65 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -55,6 +55,7 @@ + #include <linux/async.h> + #include <linux/percpu.h> + #include <linux/kmemleak.h> ++#include <linux/grsecurity.h> + + #define CREATE_TRACE_POINTS + #include <trace/events/module.h> +@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq); + static BLOCKING_NOTIFIER_HEAD(module_notify_list); + + /* Bounds of module allocation, for speeding __module_address */ +-static unsigned long module_addr_min = -1UL, module_addr_max = 0; ++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0; ++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0; + + int register_module_notifier(struct notifier_block * nb) + { +@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, + return true; + + list_for_each_entry_rcu(mod, &modules, list) { +- struct symsearch arr[] = { ++ struct symsearch modarr[] = { + { mod->syms, mod->syms + mod->num_syms, mod->crcs, + NOT_GPL_ONLY, false }, + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, +@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, + #endif + }; + +- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) ++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data)) + return true; + } + return false; +@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, + void *ptr; + int cpu; + +- if (align > PAGE_SIZE) { ++ if (align-1 >= PAGE_SIZE) { + printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", + name, align, PAGE_SIZE); + align = PAGE_SIZE; +@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, + * /sys/module/foo/sections stuff + * J. Corbet <corbet@lwn.net> + */ +-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) ++#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + + static inline bool sect_empty(const Elf_Shdr *sect) + { +@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod) + destroy_params(mod->kp, mod->num_kp); + + /* This may be NULL, but that's OK */ +- module_free(mod, mod->module_init); ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); + kfree(mod->args); + if (mod->percpu) + percpu_modfree(mod->percpu); +@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod) + percpu_modfree(mod->refptr); + #endif + /* Free lock-classes: */ +- lockdep_free_key_range(mod->module_core, mod->core_size); ++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx); ++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw); + + /* Finally, free the core (containing the module structure) */ +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_core_rx); ++ module_free(mod, mod->module_core_rw); + + #ifdef CONFIG_MPU + update_protections(current->mm); +@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs, + unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); + int ret = 0; + const struct kernel_symbol *ksym; ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ int is_fs_load = 0; ++ int register_filesystem_found = 0; ++ char *p; ++ ++ p = strstr(mod->args, "grsec_modharden_fs"); ++ ++ if (p) { ++ char *endptr = p + strlen("grsec_modharden_fs"); ++ /* copy \0 as well */ ++ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1); ++ is_fs_load = 1; ++ } ++#endif ++ + + for (i = 1; i < n; i++) { ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ const char *name = strtab + sym[i].st_name; ++ ++ /* it's a real shame this will never get ripped and copied ++ upstream! ;( ++ */ ++ if (is_fs_load && !strcmp(name, "register_filesystem")) ++ register_filesystem_found = 1; ++#endif + switch (sym[i].st_shndx) { + case SHN_COMMON: + /* We compiled with -fno-common. These are not +@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs, + strtab + sym[i].st_name, mod); + /* Ok if resolved. */ + if (ksym) { ++ pax_open_kernel(); + sym[i].st_value = ksym->value; ++ pax_close_kernel(); + break; + } + +@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs, + secbase = (unsigned long)mod->percpu; + else + secbase = sechdrs[sym[i].st_shndx].sh_addr; ++ pax_open_kernel(); + sym[i].st_value += secbase; ++ pax_close_kernel(); + break; + } + } + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (is_fs_load && !register_filesystem_found) { ++ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name); ++ ret = -EPERM; ++ } ++#endif ++ + return ret; + } + +@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod, + || s->sh_entsize != ~0UL + || strstarts(secstrings + s->sh_name, ".init")) + continue; +- s->sh_entsize = get_offset(mod, &mod->core_size, s, i); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i); + DEBUGP("\t%s\n", secstrings + s->sh_name); + } +- if (m == 0) +- mod->core_text_size = mod->core_size; + } + + DEBUGP("Init section allocation order:\n"); +@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod, + || s->sh_entsize != ~0UL + || !strstarts(secstrings + s->sh_name, ".init")) + continue; +- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) +- | INIT_OFFSET_MASK); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i); ++ s->sh_entsize |= INIT_OFFSET_MASK; + DEBUGP("\t%s\n", secstrings + s->sh_name); + } +- if (m == 0) +- mod->init_text_size = mod->init_size; + } + } + +@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value, + + /* As per nm */ + static char elf_type(const Elf_Sym *sym, +- Elf_Shdr *sechdrs, +- const char *secstrings, +- struct module *mod) ++ const Elf_Shdr *sechdrs, ++ const char *secstrings) + { + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { + if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) +@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod, + + /* Put symbol section at end of init part of module. */ + symsect->sh_flags |= SHF_ALLOC; +- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, ++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect, + symindex) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", secstrings + symsect->sh_name); + +@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod, + } + + /* Append room for core symbols at end of core part. */ +- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); +- mod->core_size = symoffs + ndst * sizeof(Elf_Sym); ++ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1); ++ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym); + + /* Put string table section at end of init part of module. */ + strsect->sh_flags |= SHF_ALLOC; +- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, ++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect, + strindex) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", secstrings + strsect->sh_name); + + /* Append room for core symbols' strings at end of core part. */ +- *pstroffs = mod->core_size; ++ *pstroffs = mod->core_size_rx; + __set_bit(0, strmap); +- mod->core_size += bitmap_weight(strmap, strsect->sh_size); ++ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size); + + return symoffs; + } +@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod, + mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); + mod->strtab = (void *)sechdrs[strindex].sh_addr; + ++ pax_open_kernel(); ++ + /* Set types up while we still have access to sections. */ + for (i = 0; i < mod->num_symtab; i++) + mod->symtab[i].st_info +- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); ++ = elf_type(&mod->symtab[i], sechdrs, secstrings); + +- mod->core_symtab = dst = mod->module_core + symoffs; ++ mod->core_symtab = dst = mod->module_core_rx + symoffs; + src = mod->symtab; + *dst = *src; + for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { +@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod, + } + mod->core_num_syms = ndst; + +- mod->core_strtab = s = mod->module_core + stroffs; ++ mod->core_strtab = s = mod->module_core_rx + stroffs; + for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i) + if (test_bit(i, strmap)) + *++s = mod->strtab[i]; ++ ++ pax_close_kernel(); + } + #else + static inline unsigned long layout_symtab(struct module *mod, +@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) + #endif + } + +-static void *module_alloc_update_bounds(unsigned long size) ++static void *module_alloc_update_bounds_rw(unsigned long size) + { + void *ret = module_alloc(size); + + if (ret) { + /* Update module bounds. */ +- if ((unsigned long)ret < module_addr_min) +- module_addr_min = (unsigned long)ret; +- if ((unsigned long)ret + size > module_addr_max) +- module_addr_max = (unsigned long)ret + size; ++ if ((unsigned long)ret < module_addr_min_rw) ++ module_addr_min_rw = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rw) ++ module_addr_max_rw = (unsigned long)ret + size; ++ } ++ return ret; ++} ++ ++static void *module_alloc_update_bounds_rx(unsigned long size) ++{ ++ void *ret = module_alloc_exec(size); ++ ++ if (ret) { ++ /* Update module bounds. */ ++ if ((unsigned long)ret < module_addr_min_rx) ++ module_addr_min_rx = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rx) ++ module_addr_max_rx = (unsigned long)ret + size; + } + return ret; + } +@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, + unsigned int i; + + /* only scan the sections containing data */ +- kmemleak_scan_area(mod->module_core, (unsigned long)mod - +- (unsigned long)mod->module_core, ++ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod - ++ (unsigned long)mod->module_core_rw, + sizeof(struct module), GFP_KERNEL); + + for (i = 1; i < hdr->e_shnum; i++) { +@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, + && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) + continue; + +- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - +- (unsigned long)mod->module_core, ++ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr - ++ (unsigned long)mod->module_core_rw, + sechdrs[i].sh_size, GFP_KERNEL); + } + } +@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod, + Elf_Ehdr *hdr; + Elf_Shdr *sechdrs; + char *secstrings, *args, *modmagic, *strtab = NULL; +- char *staging; ++ char *staging, *license; + unsigned int i; + unsigned int symindex = 0; + unsigned int strindex = 0; +@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod, + goto free_hdr; + } + ++ license = get_modinfo(sechdrs, infoindex, "license"); ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ if (!license || !license_is_gpl_compatible(license)) { ++ err = -ENOEXEC; ++ goto free_hdr; ++ } ++#endif ++ + modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); + /* This is allowed: modprobe --force will invalidate it. */ + if (!modmagic) { +@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod, + secstrings, &stroffs, strmap); + + /* Do the allocs. */ +- ptr = module_alloc_update_bounds(mod->core_size); ++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a +@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod, + err = -ENOMEM; + goto free_percpu; + } +- memset(ptr, 0, mod->core_size); +- mod->module_core = ptr; ++ memset(ptr, 0, mod->core_size_rw); ++ mod->module_core_rw = ptr; + +- ptr = module_alloc_update_bounds(mod->init_size); ++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. This block doesn't need to be + * scanned as it contains data and code that will be freed + * after the module is initialized. + */ +- kmemleak_ignore(ptr); +- if (!ptr && mod->init_size) { ++ kmemleak_not_leak(ptr); ++ if (!ptr && mod->init_size_rw) { + err = -ENOMEM; +- goto free_core; ++ goto free_core_rw; + } +- memset(ptr, 0, mod->init_size); +- mod->module_init = ptr; ++ memset(ptr, 0, mod->init_size_rw); ++ mod->module_init_rw = ptr; ++ ++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx); ++ kmemleak_not_leak(ptr); ++ if (!ptr) { ++ err = -ENOMEM; ++ goto free_init_rw; ++ } ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->core_size_rx); ++ pax_close_kernel(); ++ mod->module_core_rx = ptr; ++ ++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx); ++ kmemleak_not_leak(ptr); ++ if (!ptr && mod->init_size_rx) { ++ err = -ENOMEM; ++ goto free_core_rx; ++ } ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->init_size_rx); ++ pax_close_kernel(); ++ mod->module_init_rx = ptr; + + /* Transfer each section which specifies SHF_ALLOC */ + DEBUGP("final section addresses:\n"); +@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod, + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + +- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) +- dest = mod->module_init +- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); +- else +- dest = mod->module_core + sechdrs[i].sh_entsize; ++ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) { ++ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC)) ++ dest = mod->module_init_rw ++ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); ++ else ++ dest = mod->module_init_rx ++ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); ++ } else { ++ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC)) ++ dest = mod->module_core_rw + sechdrs[i].sh_entsize; ++ else ++ dest = mod->module_core_rx + sechdrs[i].sh_entsize; ++ } + +- if (sechdrs[i].sh_type != SHT_NOBITS) +- memcpy(dest, (void *)sechdrs[i].sh_addr, +- sechdrs[i].sh_size); ++ if (sechdrs[i].sh_type != SHT_NOBITS) { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_64 ++ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR)) ++ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT); ++#endif ++ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) { ++ pax_open_kernel(); ++ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size); ++ pax_close_kernel(); ++ } else ++#endif ++ ++ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size); ++ } + /* Update sh_addr to point to copy in image. */ +- sechdrs[i].sh_addr = (unsigned long)dest; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (sechdrs[i].sh_flags & SHF_EXECINSTR) ++ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest); ++ else ++#endif ++ ++ sechdrs[i].sh_addr = (unsigned long)dest; + DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); + } + /* Module has been moved. */ +@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod, + mod->name); + if (!mod->refptr) { + err = -ENOMEM; +- goto free_init; ++ goto free_init_rx; + } + #endif + /* Now we've moved module, initialize linked lists, etc. */ +@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod, + goto free_unload; + + /* Set up license info based on the info section */ +- set_license(mod, get_modinfo(sechdrs, infoindex, "license")); ++ set_license(mod, license); + + /* + * ndiswrapper is under GPL by itself, but loads proprietary modules. +@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod, + /* Set up MODINFO_ATTR fields */ + setup_modinfo(mod, sechdrs, infoindex); + ++ mod->args = args; ++ ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ { ++ char *p, *p2; ++ ++ if (strstr(mod->args, "grsec_modharden_netdev")) { ++ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name); ++ err = -EPERM; ++ goto cleanup; ++ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) { ++ p += strlen("grsec_modharden_normal"); ++ p2 = strstr(p, "_"); ++ if (p2) { ++ *p2 = '\0'; ++ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p); ++ *p2 = '_'; ++ } ++ err = -EPERM; ++ goto cleanup; ++ } ++ } ++#endif ++ ++ + /* Fix up syms, so that st_value is a pointer to location. */ + err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex, + mod); +@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod, + + /* Now do relocations. */ + for (i = 1; i < hdr->e_shnum; i++) { +- const char *strtab = (char *)sechdrs[strindex].sh_addr; + unsigned int info = sechdrs[i].sh_info; ++ strtab = (char *)sechdrs[strindex].sh_addr; + + /* Not a valid relocation section? */ + if (info >= hdr->e_shnum) +@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod, + * Do it before processing of module parameters, so the module + * can provide parameter accessor functions of its own. + */ +- if (mod->module_init) +- flush_icache_range((unsigned long)mod->module_init, +- (unsigned long)mod->module_init +- + mod->init_size); +- flush_icache_range((unsigned long)mod->module_core, +- (unsigned long)mod->module_core + mod->core_size); ++ if (mod->module_init_rx) ++ flush_icache_range((unsigned long)mod->module_init_rx, ++ (unsigned long)mod->module_init_rx ++ + mod->init_size_rx); ++ flush_icache_range((unsigned long)mod->module_core_rx, ++ (unsigned long)mod->module_core_rx + mod->core_size_rx); + + set_fs(old_fs); + +- mod->args = args; + if (section_addr(hdr, sechdrs, secstrings, "__obsparm")) + printk(KERN_WARNING "%s: Ignoring obsolete parameters\n", + mod->name); +@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod, + free_unload: + module_unload_free(mod); + #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) ++ free_init_rx: + percpu_modfree(mod->refptr); +- free_init: + #endif +- module_free(mod, mod->module_init); +- free_core: +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_init_rx); ++ free_core_rx: ++ module_free_exec(mod, mod->module_core_rx); ++ free_init_rw: ++ module_free(mod, mod->module_init_rw); ++ free_core_rw: ++ module_free(mod, mod->module_core_rw); + /* mod will be freed with core. Don't access it beyond this line! */ + free_percpu: + if (percpu) +@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, + mod->symtab = mod->core_symtab; + mod->strtab = mod->core_strtab; + #endif +- module_free(mod, mod->module_init); +- mod->module_init = NULL; +- mod->init_size = 0; +- mod->init_text_size = 0; ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); ++ mod->module_init_rw = NULL; ++ mod->module_init_rx = NULL; ++ mod->init_size_rw = 0; ++ mod->init_size_rx = 0; + mutex_unlock(&module_mutex); + + return 0; +@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod, + unsigned long nextval; + + /* At worse, next value is at end of module */ +- if (within_module_init(addr, mod)) +- nextval = (unsigned long)mod->module_init+mod->init_text_size; ++ if (within_module_init_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx; ++ else if (within_module_init_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw; ++ else if (within_module_core_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx; ++ else if (within_module_core_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw; + else +- nextval = (unsigned long)mod->module_core+mod->core_text_size; ++ return NULL; + + /* Scan for closest preceeding symbol, and next symbol. (ELF + starts real symbols at 1). */ +@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p) + char buf[8]; + + seq_printf(m, "%s %u", +- mod->name, mod->init_size + mod->core_size); ++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw); + print_unload_info(m, mod); + + /* Informative for users. */ +@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p) + mod->state == MODULE_STATE_COMING ? "Loading": + "Live"); + /* Used by oprofile and other similar tools. */ +- seq_printf(m, " 0x%p", mod->module_core); ++ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw); + + /* Taints info */ + if (mod->taints) +@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = { + + static int __init proc_modules_init(void) + { ++#ifndef CONFIG_GRKERNSEC_HIDESYM ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations); ++#else + proc_create("modules", 0, NULL, &proc_modules_operations); ++#endif ++#else ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#endif + return 0; + } + module_init(proc_modules_init); +@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr) + { + struct module *mod; + +- if (addr < module_addr_min || addr > module_addr_max) ++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) && ++ (addr < module_addr_min_rw || addr > module_addr_max_rw)) + return NULL; + + list_for_each_entry_rcu(mod, &modules, list) +- if (within_module_core(addr, mod) +- || within_module_init(addr, mod)) ++ if (within_module_init(addr, mod) || within_module_core(addr, mod)) + return mod; + return NULL; + } +@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr) + */ + struct module *__module_text_address(unsigned long addr) + { +- struct module *mod = __module_address(addr); ++ struct module *mod; ++ ++#ifdef CONFIG_X86_32 ++ addr = ktla_ktva(addr); ++#endif ++ ++ if (addr < module_addr_min_rx || addr > module_addr_max_rx) ++ return NULL; ++ ++ mod = __module_address(addr); ++ + if (mod) { + /* Make sure it's within the text section. */ +- if (!within(addr, mod->module_init, mod->init_text_size) +- && !within(addr, mod->module_core, mod->core_text_size)) ++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod)) + mod = NULL; + } + return mod; +diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c +index ec815a9..fe46e99 100644 +--- a/kernel/mutex-debug.c ++++ b/kernel/mutex-debug.c +@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) + } + + void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti) ++ struct task_struct *task) + { + SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); + + /* Mark the current thread as blocked on the lock: */ +- ti->task->blocked_on = waiter; ++ task->blocked_on = waiter; + } + + void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti) ++ struct task_struct *task) + { + DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); +- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); +- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); +- ti->task->blocked_on = NULL; ++ DEBUG_LOCKS_WARN_ON(waiter->task != task); ++ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); ++ task->blocked_on = NULL; + + list_del_init(&waiter->list); + waiter->task = NULL; +@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock) + return; + + DEBUG_LOCKS_WARN_ON(lock->magic != lock); +- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); ++ DEBUG_LOCKS_WARN_ON(lock->owner != current); + DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); + mutex_clear_owner(lock); + } +diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h +index 6b2d735..372d3c4 100644 +--- a/kernel/mutex-debug.h ++++ b/kernel/mutex-debug.h +@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock, + extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); + extern void debug_mutex_add_waiter(struct mutex *lock, + struct mutex_waiter *waiter, +- struct thread_info *ti); ++ struct task_struct *task); + extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti); ++ struct task_struct *task); + extern void debug_mutex_unlock(struct mutex *lock); + extern void debug_mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); + + static inline void mutex_set_owner(struct mutex *lock) + { +- lock->owner = current_thread_info(); ++ lock->owner = current; + } + + static inline void mutex_clear_owner(struct mutex *lock) +diff --git a/kernel/mutex.c b/kernel/mutex.c +index f85644c..5ee9f77 100644 +--- a/kernel/mutex.c ++++ b/kernel/mutex.c +@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + */ + + for (;;) { +- struct thread_info *owner; ++ struct task_struct *owner; + + /* + * If we own the BKL, then don't spin. The owner of +@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + spin_lock_mutex(&lock->wait_lock, flags); + + debug_mutex_lock_common(lock, &waiter); +- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); ++ debug_mutex_add_waiter(lock, &waiter, task); + + /* add waiting tasks to the end of the waitqueue (FIFO): */ + list_add_tail(&waiter.list, &lock->wait_list); +@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + * TASK_UNINTERRUPTIBLE case.) + */ + if (unlikely(signal_pending_state(state, task))) { +- mutex_remove_waiter(lock, &waiter, +- task_thread_info(task)); ++ mutex_remove_waiter(lock, &waiter, task); + mutex_release(&lock->dep_map, 1, ip); + spin_unlock_mutex(&lock->wait_lock, flags); + +@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + done: + lock_acquired(&lock->dep_map, ip); + /* got the lock - rejoice! */ +- mutex_remove_waiter(lock, &waiter, current_thread_info()); ++ mutex_remove_waiter(lock, &waiter, task); + mutex_set_owner(lock); + + /* set it to 0 if there are no waiters left: */ +diff --git a/kernel/mutex.h b/kernel/mutex.h +index 67578ca..4115fbf 100644 +--- a/kernel/mutex.h ++++ b/kernel/mutex.h +@@ -19,7 +19,7 @@ + #ifdef CONFIG_SMP + static inline void mutex_set_owner(struct mutex *lock) + { +- lock->owner = current_thread_info(); ++ lock->owner = current; + } + + static inline void mutex_clear_owner(struct mutex *lock) +diff --git a/kernel/panic.c b/kernel/panic.c +index 96b45d0..7677a03 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...) + va_end(args); + printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); + #ifdef CONFIG_DEBUG_BUGVERBOSE +- dump_stack(); ++ /* ++ * Avoid nested stack-dumping if a panic occurs during oops processing ++ */ ++ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) ++ dump_stack(); + #endif + + /* +@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc + const char *board; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); +- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); ++ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller); + board = dmi_get_system_info(DMI_PRODUCT_NAME); + if (board) + printk(KERN_WARNING "Hardware name: %s\n", board); +@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null); + */ + void __stack_chk_fail(void) + { +- panic("stack-protector: Kernel stack is corrupted in: %p\n", ++ dump_stack(); ++ panic("stack-protector: Kernel stack is corrupted in: %pA\n", + __builtin_return_address(0)); + } + EXPORT_SYMBOL(__stack_chk_fail); +diff --git a/kernel/params.c b/kernel/params.c +index d656c27..21e452c 100644 +--- a/kernel/params.c ++++ b/kernel/params.c +@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj, + return ret; + } + +-static struct sysfs_ops module_sysfs_ops = { ++static const struct sysfs_ops module_sysfs_ops = { + .show = module_attr_show, + .store = module_attr_store, + }; +@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj) + return 0; + } + +-static struct kset_uevent_ops module_uevent_ops = { ++static const struct kset_uevent_ops module_uevent_ops = { + .filter = uevent_filter, + }; + +diff --git a/kernel/perf_event.c b/kernel/perf_event.c +index 37ebc14..9c121d9 100644 +--- a/kernel/perf_event.c ++++ b/kernel/perf_event.c +@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ + */ + int sysctl_perf_event_sample_rate __read_mostly = 100000; + +-static atomic64_t perf_event_id; ++static atomic64_unchecked_t perf_event_id; + + /* + * Lock for (sysadmin-configurable) event reservations: +@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event, + * In order to keep per-task stats reliable we need to flip the event + * values when we flip the contexts. + */ +- value = atomic64_read(&next_event->count); +- value = atomic64_xchg(&event->count, value); +- atomic64_set(&next_event->count, value); ++ value = atomic64_read_unchecked(&next_event->count); ++ value = atomic64_xchg_unchecked(&event->count, value); ++ atomic64_set_unchecked(&next_event->count, value); + + swap(event->total_time_enabled, next_event->total_time_enabled); + swap(event->total_time_running, next_event->total_time_running); +@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event) + update_event_times(event); + } + +- return atomic64_read(&event->count); ++ return atomic64_read_unchecked(&event->count); + } + + /* +@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event, + values[n++] = 1 + leader->nr_siblings; + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = leader->total_time_enabled + +- atomic64_read(&leader->child_total_time_enabled); ++ atomic64_read_unchecked(&leader->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = leader->total_time_running + +- atomic64_read(&leader->child_total_time_running); ++ atomic64_read_unchecked(&leader->child_total_time_running); + } + + size = n * sizeof(u64); +@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event, + values[n++] = perf_event_read_value(event); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = event->total_time_enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = event->total_time_running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); +@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) + static void perf_event_reset(struct perf_event *event) + { + (void)perf_event_read(event); +- atomic64_set(&event->count, 0); ++ atomic64_set_unchecked(&event->count, 0); + perf_event_update_userpage(event); + } + +@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event) + ++userpg->lock; + barrier(); + userpg->index = perf_event_index(event); +- userpg->offset = atomic64_read(&event->count); ++ userpg->offset = atomic64_read_unchecked(&event->count); + if (event->state == PERF_EVENT_STATE_ACTIVE) +- userpg->offset -= atomic64_read(&event->hw.prev_count); ++ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count); + + userpg->time_enabled = event->total_time_enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + + userpg->time_running = event->total_time_running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + + barrier(); + ++userpg->lock; +@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle, + u64 values[4]; + int n = 0; + +- values[n++] = atomic64_read(&event->count); ++ values[n++] = atomic64_read_unchecked(&event->count); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = event->total_time_enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = event->total_time_running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); +@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, + if (leader != event) + leader->pmu->read(leader); + +- values[n++] = atomic64_read(&leader->count); ++ values[n++] = atomic64_read_unchecked(&leader->count); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(leader); + +@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, + if (sub != event) + sub->pmu->read(sub); + +- values[n++] = atomic64_read(&sub->count); ++ values[n++] = atomic64_read_unchecked(&sub->count); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(sub); + +@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) + * need to add enough zero bytes after the string to handle + * the 64bit alignment we do later. + */ +- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); ++ buf = kzalloc(PATH_MAX, GFP_KERNEL); + if (!buf) { + name = strncpy(tmp, "//enomem", sizeof(tmp)); + goto got_name; + } +- name = d_path(&file->f_path, buf, PATH_MAX); ++ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64)); + if (IS_ERR(name)) { + name = strncpy(tmp, "//toolong", sizeof(tmp)); + goto got_name; +@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, + { + struct hw_perf_event *hwc = &event->hw; + +- atomic64_add(nr, &event->count); ++ atomic64_add_unchecked(nr, &event->count); + + if (!hwc->sample_period) + return; +@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event) + u64 now; + + now = cpu_clock(cpu); +- prev = atomic64_read(&event->hw.prev_count); +- atomic64_set(&event->hw.prev_count, now); +- atomic64_add(now - prev, &event->count); ++ prev = atomic64_read_unchecked(&event->hw.prev_count); ++ atomic64_set_unchecked(&event->hw.prev_count, now); ++ atomic64_add_unchecked(now - prev, &event->count); + } + + static int cpu_clock_perf_event_enable(struct perf_event *event) +@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) + struct hw_perf_event *hwc = &event->hw; + int cpu = raw_smp_processor_id(); + +- atomic64_set(&hwc->prev_count, cpu_clock(cpu)); ++ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu)); + perf_swevent_start_hrtimer(event); + + return 0; +@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now) + u64 prev; + s64 delta; + +- prev = atomic64_xchg(&event->hw.prev_count, now); ++ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now); + delta = now - prev; +- atomic64_add(delta, &event->count); ++ atomic64_add_unchecked(delta, &event->count); + } + + static int task_clock_perf_event_enable(struct perf_event *event) +@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event) + + now = event->ctx->time; + +- atomic64_set(&hwc->prev_count, now); ++ atomic64_set_unchecked(&hwc->prev_count, now); + + perf_swevent_start_hrtimer(event); + +@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr, + event->parent = parent_event; + + event->ns = get_pid_ns(current->nsproxy->pid_ns); +- event->id = atomic64_inc_return(&perf_event_id); ++ event->id = atomic64_inc_return_unchecked(&perf_event_id); + + event->state = PERF_EVENT_STATE_INACTIVE; + +@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event, + if (child_event->attr.inherit_stat) + perf_event_read_event(child_event, child); + +- child_val = atomic64_read(&child_event->count); ++ child_val = atomic64_read_unchecked(&child_event->count); + + /* + * Add back the child's count to the parent's count: + */ +- atomic64_add(child_val, &parent_event->count); +- atomic64_add(child_event->total_time_enabled, ++ atomic64_add_unchecked(child_val, &parent_event->count); ++ atomic64_add_unchecked(child_event->total_time_enabled, + &parent_event->child_total_time_enabled); +- atomic64_add(child_event->total_time_running, ++ atomic64_add_unchecked(child_event->total_time_running, + &parent_event->child_total_time_running); + + /* +diff --git a/kernel/pid.c b/kernel/pid.c +index fce7198..4f23a7e 100644 +--- a/kernel/pid.c ++++ b/kernel/pid.c +@@ -33,6 +33,7 @@ + #include <linux/rculist.h> + #include <linux/bootmem.h> + #include <linux/hash.h> ++#include <linux/security.h> + #include <linux/pid_namespace.h> + #include <linux/init_task.h> + #include <linux/syscalls.h> +@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID; + + int pid_max = PID_MAX_DEFAULT; + +-#define RESERVED_PIDS 300 ++#define RESERVED_PIDS 500 + + int pid_max_min = RESERVED_PIDS + 1; + int pid_max_max = PID_MAX_LIMIT; +@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task); + */ + struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) + { +- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ struct task_struct *task; ++ ++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ ++ if (gr_pid_is_chrooted(task)) ++ return NULL; ++ ++ return task; + } + + struct task_struct *find_task_by_vpid(pid_t vnr) +@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr) + return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns); + } + ++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr) ++{ ++ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID); ++} ++ + struct pid *get_task_pid(struct task_struct *task, enum pid_type type) + { + struct pid *pid; +diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c +index 5c9dc22..d271117 100644 +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -6,6 +6,7 @@ + #include <linux/posix-timers.h> + #include <linux/errno.h> + #include <linux/math64.h> ++#include <linux/security.h> + #include <asm/uaccess.h> + #include <linux/kernel_stat.h> + #include <trace/events/timer.h> +@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block) + + static __init int init_posix_cpu_timers(void) + { +- struct k_clock process = { ++ static struct k_clock process = { + .clock_getres = process_cpu_clock_getres, + .clock_get = process_cpu_clock_get, + .clock_set = do_posix_clock_nosettime, +@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void) + .nsleep = process_cpu_nsleep, + .nsleep_restart = process_cpu_nsleep_restart, + }; +- struct k_clock thread = { ++ static struct k_clock thread = { + .clock_getres = thread_cpu_clock_getres, + .clock_get = thread_cpu_clock_get, + .clock_set = do_posix_clock_nosettime, +diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c +index 5e76d22..cf1baeb 100644 +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c +@@ -42,6 +42,7 @@ + #include <linux/compiler.h> + #include <linux/idr.h> + #include <linux/posix-timers.h> ++#include <linux/grsecurity.h> + #include <linux/syscalls.h> + #include <linux/wait.h> + #include <linux/workqueue.h> +@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock); + * which we beg off on and pass to do_sys_settimeofday(). + */ + +-static struct k_clock posix_clocks[MAX_CLOCKS]; ++static struct k_clock *posix_clocks[MAX_CLOCKS]; + + /* + * These ones are defined below. +@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) + */ + #define CLOCK_DISPATCH(clock, call, arglist) \ + ((clock) < 0 ? posix_cpu_##call arglist : \ +- (posix_clocks[clock].call != NULL \ +- ? (*posix_clocks[clock].call) arglist : common_##call arglist)) ++ (posix_clocks[clock]->call != NULL \ ++ ? (*posix_clocks[clock]->call) arglist : common_##call arglist)) + + /* + * Default clock hook functions when the struct k_clock passed +@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock, + struct timespec *tp) + { + tp->tv_sec = 0; +- tp->tv_nsec = posix_clocks[which_clock].res; ++ tp->tv_nsec = posix_clocks[which_clock]->res; + return 0; + } + +@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock) + return 0; + if ((unsigned) which_clock >= MAX_CLOCKS) + return 1; +- if (posix_clocks[which_clock].clock_getres != NULL) ++ if (posix_clocks[which_clock] == NULL) + return 0; +- if (posix_clocks[which_clock].res != 0) ++ if (posix_clocks[which_clock]->clock_getres != NULL) ++ return 0; ++ if (posix_clocks[which_clock]->res != 0) + return 0; + return 1; + } +@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp) + */ + static __init int init_posix_timers(void) + { +- struct k_clock clock_realtime = { ++ static struct k_clock clock_realtime = { + .clock_getres = hrtimer_get_res, + }; +- struct k_clock clock_monotonic = { ++ static struct k_clock clock_monotonic = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_ktime_get_ts, + .clock_set = do_posix_clock_nosettime, + }; +- struct k_clock clock_monotonic_raw = { ++ static struct k_clock clock_monotonic_raw = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_get_monotonic_raw, + .clock_set = do_posix_clock_nosettime, + .timer_create = no_timer_create, + .nsleep = no_nsleep, + }; +- struct k_clock clock_realtime_coarse = { ++ static struct k_clock clock_realtime_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_realtime_coarse, + .clock_set = do_posix_clock_nosettime, + .timer_create = no_timer_create, + .nsleep = no_nsleep, + }; +- struct k_clock clock_monotonic_coarse = { ++ static struct k_clock clock_monotonic_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_monotonic_coarse, + .clock_set = do_posix_clock_nosettime, +@@ -296,6 +299,8 @@ static __init int init_posix_timers(void) + .nsleep = no_nsleep, + }; + ++ pax_track_stack(); ++ + register_posix_clock(CLOCK_REALTIME, &clock_realtime); + register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); + register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); +@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) + return; + } + +- posix_clocks[clock_id] = *new_clock; ++ posix_clocks[clock_id] = new_clock; + } + EXPORT_SYMBOL_GPL(register_posix_clock); + +@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, + if (copy_from_user(&new_tp, tp, sizeof (*tp))) + return -EFAULT; + ++ /* only the CLOCK_REALTIME clock can be set, all other clocks ++ have their clock_set fptr set to a nosettime dummy function ++ CLOCK_REALTIME has a NULL clock_set fptr which causes it to ++ call common_clock_set, which calls do_sys_settimeofday, which ++ we hook ++ */ ++ + return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); + } + +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c +index 04a9e90..bc355aa 100644 +--- a/kernel/power/hibernate.c ++++ b/kernel/power/hibernate.c +@@ -48,14 +48,14 @@ enum { + + static int hibernation_mode = HIBERNATION_SHUTDOWN; + +-static struct platform_hibernation_ops *hibernation_ops; ++static const struct platform_hibernation_ops *hibernation_ops; + + /** + * hibernation_set_ops - set the global hibernate operations + * @ops: the hibernation operations to use in subsequent hibernation transitions + */ + +-void hibernation_set_ops(struct platform_hibernation_ops *ops) ++void hibernation_set_ops(const struct platform_hibernation_ops *ops) + { + if (ops && !(ops->begin && ops->end && ops->pre_snapshot + && ops->prepare && ops->finish && ops->enter && ops->pre_restore +diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c +index e8b3370..484c2e4 100644 +--- a/kernel/power/poweroff.c ++++ b/kernel/power/poweroff.c +@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = { + .enable_mask = SYSRQ_ENABLE_BOOT, + }; + +-static int pm_sysrq_init(void) ++static int __init pm_sysrq_init(void) + { + register_sysrq_key('o', &sysrq_poweroff_op); + return 0; +diff --git a/kernel/power/process.c b/kernel/power/process.c +index e7cd671..56d5f459 100644 +--- a/kernel/power/process.c ++++ b/kernel/power/process.c +@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only) + struct timeval start, end; + u64 elapsed_csecs64; + unsigned int elapsed_csecs; ++ bool timedout = false; + + do_gettimeofday(&start); + + end_time = jiffies + TIMEOUT; + do { + todo = 0; ++ if (time_after(jiffies, end_time)) ++ timedout = true; + read_lock(&tasklist_lock); + do_each_thread(g, p) { + if (frozen(p) || !freezeable(p)) +@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only) + * It is "frozen enough". If the task does wake + * up, it will immediately call try_to_freeze. + */ +- if (!task_is_stopped_or_traced(p) && +- !freezer_should_skip(p)) ++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) { + todo++; ++ if (timedout) { ++ printk(KERN_ERR "Task refusing to freeze:\n"); ++ sched_show_task(p); ++ } ++ } + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + yield(); /* Yield is okay here */ +- if (time_after(jiffies, end_time)) +- break; +- } while (todo); ++ } while (todo && !timedout); + + do_gettimeofday(&end); + elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c +index 40dd021..fb30ceb 100644 +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c +@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = { + [PM_SUSPEND_MEM] = "mem", + }; + +-static struct platform_suspend_ops *suspend_ops; ++static const struct platform_suspend_ops *suspend_ops; + + /** + * suspend_set_ops - Set the global suspend method table. + * @ops: Pointer to ops structure. + */ +-void suspend_set_ops(struct platform_suspend_ops *ops) ++void suspend_set_ops(const struct platform_suspend_ops *ops) + { + mutex_lock(&pm_mutex); + suspend_ops = ops; +diff --git a/kernel/printk.c b/kernel/printk.c +index 4cade47..4d17900 100644 +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -33,6 +33,7 @@ + #include <linux/bootmem.h> + #include <linux/syscalls.h> + #include <linux/kexec.h> ++#include <linux/syslog.h> + + #include <asm/uaccess.h> + +@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void) + } + #endif + +-/* +- * Commands to do_syslog: +- * +- * 0 -- Close the log. Currently a NOP. +- * 1 -- Open the log. Currently a NOP. +- * 2 -- Read from the log. +- * 3 -- Read all messages remaining in the ring buffer. +- * 4 -- Read and clear all messages remaining in the ring buffer +- * 5 -- Clear ring buffer. +- * 6 -- Disable printk's to console +- * 7 -- Enable printk's to console +- * 8 -- Set level of messages printed to console +- * 9 -- Return number of unread characters in the log buffer +- * 10 -- Return size of the log buffer +- */ +-int do_syslog(int type, char __user *buf, int len) ++int do_syslog(int type, char __user *buf, int len, bool from_file) + { + unsigned i, j, limit, count; + int do_clear = 0; + char c; + int error = 0; + +- error = security_syslog(type); ++#ifdef CONFIG_GRKERNSEC_DMESG ++ if (grsec_enable_dmesg && ++ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) && ++ !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++#endif ++ ++ error = security_syslog(type, from_file); + if (error) + return error; + + switch (type) { +- case 0: /* Close log */ ++ case SYSLOG_ACTION_CLOSE: /* Close log */ + break; +- case 1: /* Open log */ ++ case SYSLOG_ACTION_OPEN: /* Open log */ + break; +- case 2: /* Read from log */ ++ case SYSLOG_ACTION_READ: /* Read from log */ + error = -EINVAL; + if (!buf || len < 0) + goto out; +@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len) + if (!error) + error = i; + break; +- case 4: /* Read/clear last kernel messages */ ++ /* Read/clear last kernel messages */ ++ case SYSLOG_ACTION_READ_CLEAR: + do_clear = 1; + /* FALL THRU */ +- case 3: /* Read last kernel messages */ ++ /* Read last kernel messages */ ++ case SYSLOG_ACTION_READ_ALL: + error = -EINVAL; + if (!buf || len < 0) + goto out; +@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len) + } + } + break; +- case 5: /* Clear ring buffer */ ++ /* Clear ring buffer */ ++ case SYSLOG_ACTION_CLEAR: + logged_chars = 0; + break; +- case 6: /* Disable logging to console */ ++ /* Disable logging to console */ ++ case SYSLOG_ACTION_CONSOLE_OFF: + if (saved_console_loglevel == -1) + saved_console_loglevel = console_loglevel; + console_loglevel = minimum_console_loglevel; + break; +- case 7: /* Enable logging to console */ ++ /* Enable logging to console */ ++ case SYSLOG_ACTION_CONSOLE_ON: + if (saved_console_loglevel != -1) { + console_loglevel = saved_console_loglevel; + saved_console_loglevel = -1; + } + break; +- case 8: /* Set level of messages printed to console */ ++ /* Set level of messages printed to console */ ++ case SYSLOG_ACTION_CONSOLE_LEVEL: + error = -EINVAL; + if (len < 1 || len > 8) + goto out; +@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len) + saved_console_loglevel = -1; + error = 0; + break; +- case 9: /* Number of chars in the log buffer */ ++ /* Number of chars in the log buffer */ ++ case SYSLOG_ACTION_SIZE_UNREAD: + error = log_end - log_start; + break; +- case 10: /* Size of the log buffer */ ++ /* Size of the log buffer */ ++ case SYSLOG_ACTION_SIZE_BUFFER: + error = log_buf_len; + break; + default: +@@ -415,7 +416,7 @@ out: + + SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) + { +- return do_syslog(type, buf, len); ++ return do_syslog(type, buf, len, SYSLOG_FROM_CALL); + } + + /* +diff --git a/kernel/profile.c b/kernel/profile.c +index dfadc5b..7f59404 100644 +--- a/kernel/profile.c ++++ b/kernel/profile.c +@@ -39,7 +39,7 @@ struct profile_hit { + /* Oprofile timer tick hook */ + static int (*timer_hook)(struct pt_regs *) __read_mostly; + +-static atomic_t *prof_buffer; ++static atomic_unchecked_t *prof_buffer; + static unsigned long prof_len, prof_shift; + + int prof_on __read_mostly; +@@ -283,7 +283,7 @@ static void profile_flip_buffers(void) + hits[i].pc = 0; + continue; + } +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); + hits[i].hits = hits[i].pc = 0; + } + } +@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits) + * Add the current hit(s) and flush the write-queue out + * to the global buffer: + */ +- atomic_add(nr_hits, &prof_buffer[pc]); ++ atomic_add_unchecked(nr_hits, &prof_buffer[pc]); + for (i = 0; i < NR_PROFILE_HIT; ++i) { +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); + hits[i].pc = hits[i].hits = 0; + } + out: +@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits) + if (prof_on != type || !prof_buffer) + return; + pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; +- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); ++ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); + } + #endif /* !CONFIG_SMP */ + EXPORT_SYMBOL_GPL(profile_hits); +@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) + return -EFAULT; + buf++; p++; count--; read++; + } +- pnt = (char *)prof_buffer + p - sizeof(atomic_t); ++ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t); + if (copy_to_user(buf, (void *)pnt, count)) + return -EFAULT; + read += count; +@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, + } + #endif + profile_discard_flip_buffers(); +- memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); ++ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t)); + return count; + } + +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 05625f6..733bf70 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill) + return ret; + } + +-int __ptrace_may_access(struct task_struct *task, unsigned int mode) ++static int __ptrace_may_access(struct task_struct *task, unsigned int mode, ++ unsigned int log) + { + const struct cred *cred = current_cred(), *tcred; + +@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) + cred->gid != tcred->egid || + cred->gid != tcred->sgid || + cred->gid != tcred->gid) && +- !capable(CAP_SYS_PTRACE)) { ++ ((!log && !capable_nolog(CAP_SYS_PTRACE)) || ++ (log && !capable(CAP_SYS_PTRACE))) ++ ) { + rcu_read_unlock(); + return -EPERM; + } +@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) + smp_rmb(); + if (task->mm) + dumpable = get_dumpable(task->mm); +- if (!dumpable && !capable(CAP_SYS_PTRACE)) ++ if (!dumpable && ++ ((!log && !capable_nolog(CAP_SYS_PTRACE)) || ++ (log && !capable(CAP_SYS_PTRACE)))) + return -EPERM; + + return security_ptrace_access_check(task, mode); +@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) + { + int err; + task_lock(task); +- err = __ptrace_may_access(task, mode); ++ err = __ptrace_may_access(task, mode, 0); ++ task_unlock(task); ++ return !err; ++} ++ ++bool ptrace_may_access_log(struct task_struct *task, unsigned int mode) ++{ ++ int err; ++ task_lock(task); ++ err = __ptrace_may_access(task, mode, 1); + task_unlock(task); + return !err; + } +@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task) + goto out; + + task_lock(task); +- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); ++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1); + task_unlock(task); + if (retval) + goto unlock_creds; +@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task) + goto unlock_tasklist; + + task->ptrace = PT_PTRACED; +- if (capable(CAP_SYS_PTRACE)) ++ if (capable_nolog(CAP_SYS_PTRACE)) + task->ptrace |= PT_PTRACE_CAP; + + __ptrace_link(task, current); +@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst + { + int copied = 0; + ++ pax_track_stack(); ++ + while (len > 0) { + char buf[128]; + int this_len, retval; +@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds + { + int copied = 0; + ++ pax_track_stack(); ++ + while (len > 0) { + char buf[128]; + int this_len, retval; +@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request, + int ret = -EIO; + siginfo_t siginfo; + ++ pax_track_stack(); ++ + switch (request) { + case PTRACE_PEEKTEXT: + case PTRACE_PEEKDATA: +@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request, + ret = ptrace_setoptions(child, data); + break; + case PTRACE_GETEVENTMSG: +- ret = put_user(child->ptrace_message, (unsigned long __user *) data); ++ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data); + break; + + case PTRACE_GETSIGINFO: + ret = ptrace_getsiginfo(child, &siginfo); + if (!ret) +- ret = copy_siginfo_to_user((siginfo_t __user *) data, ++ ret = copy_siginfo_to_user((__force siginfo_t __user *) data, + &siginfo); + break; + + case PTRACE_SETSIGINFO: +- if (copy_from_user(&siginfo, (siginfo_t __user *) data, ++ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data, + sizeof siginfo)) + ret = -EFAULT; + else +@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) + goto out; + } + ++ if (gr_handle_ptrace(child, request)) { ++ ret = -EPERM; ++ goto out_put_task_struct; ++ } ++ + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + /* + * Some architectures need to do book-keeping after + * a ptrace attach. + */ +- if (!ret) ++ if (!ret) { + arch_ptrace_attach(child); ++ gr_audit_ptrace(child); ++ } + goto out_put_task_struct; + } + +@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) + copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); + if (copied != sizeof(tmp)) + return -EIO; +- return put_user(tmp, (unsigned long __user *)data); ++ return put_user(tmp, (__force unsigned long __user *)data); + } + + int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) +@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, + siginfo_t siginfo; + int ret; + ++ pax_track_stack(); ++ + switch (request) { + case PTRACE_PEEKTEXT: + case PTRACE_PEEKDATA: +@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + goto out; + } + ++ if (gr_handle_ptrace(child, request)) { ++ ret = -EPERM; ++ goto out_put_task_struct; ++ } ++ + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + /* + * Some architectures need to do book-keeping after + * a ptrace attach. + */ +- if (!ret) ++ if (!ret) { + arch_ptrace_attach(child); ++ gr_audit_ptrace(child); ++ } + goto out_put_task_struct; + } + +diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c +index 697c0a0..2402696 100644 +--- a/kernel/rcutorture.c ++++ b/kernel/rcutorture.c +@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = + { 0 }; + static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = + { 0 }; +-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; +-static atomic_t n_rcu_torture_alloc; +-static atomic_t n_rcu_torture_alloc_fail; +-static atomic_t n_rcu_torture_free; +-static atomic_t n_rcu_torture_mberror; +-static atomic_t n_rcu_torture_error; ++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; ++static atomic_unchecked_t n_rcu_torture_alloc; ++static atomic_unchecked_t n_rcu_torture_alloc_fail; ++static atomic_unchecked_t n_rcu_torture_free; ++static atomic_unchecked_t n_rcu_torture_mberror; ++static atomic_unchecked_t n_rcu_torture_error; + static long n_rcu_torture_timers; + static struct list_head rcu_torture_removed; + static cpumask_var_t shuffle_tmp_mask; +@@ -187,11 +187,11 @@ rcu_torture_alloc(void) + + spin_lock_bh(&rcu_torture_lock); + if (list_empty(&rcu_torture_freelist)) { +- atomic_inc(&n_rcu_torture_alloc_fail); ++ atomic_inc_unchecked(&n_rcu_torture_alloc_fail); + spin_unlock_bh(&rcu_torture_lock); + return NULL; + } +- atomic_inc(&n_rcu_torture_alloc); ++ atomic_inc_unchecked(&n_rcu_torture_alloc); + p = rcu_torture_freelist.next; + list_del_init(p); + spin_unlock_bh(&rcu_torture_lock); +@@ -204,7 +204,7 @@ rcu_torture_alloc(void) + static void + rcu_torture_free(struct rcu_torture *p) + { +- atomic_inc(&n_rcu_torture_free); ++ atomic_inc_unchecked(&n_rcu_torture_free); + spin_lock_bh(&rcu_torture_lock); + list_add_tail(&p->rtort_free, &rcu_torture_freelist); + spin_unlock_bh(&rcu_torture_lock); +@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p) + i = rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { + rp->rtort_mbtest = 0; + rcu_torture_free(rp); +@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p) + i = rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { + rp->rtort_mbtest = 0; + list_del(&rp->rtort_free); +@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg) + i = old_rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + old_rp->rtort_pipe_count++; + cur_ops->deferred_free(old_rp); + } +@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused) + return; + } + if (p->rtort_mbtest == 0) +- atomic_inc(&n_rcu_torture_mberror); ++ atomic_inc_unchecked(&n_rcu_torture_mberror); + spin_lock(&rand_lock); + cur_ops->read_delay(&rand); + n_rcu_torture_timers++; +@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg) + continue; + } + if (p->rtort_mbtest == 0) +- atomic_inc(&n_rcu_torture_mberror); ++ atomic_inc_unchecked(&n_rcu_torture_mberror); + cur_ops->read_delay(&rand); + preempt_disable(); + pipe_count = p->rtort_pipe_count; +@@ -834,17 +834,17 @@ rcu_torture_printk(char *page) + rcu_torture_current, + rcu_torture_current_version, + list_empty(&rcu_torture_freelist), +- atomic_read(&n_rcu_torture_alloc), +- atomic_read(&n_rcu_torture_alloc_fail), +- atomic_read(&n_rcu_torture_free), +- atomic_read(&n_rcu_torture_mberror), ++ atomic_read_unchecked(&n_rcu_torture_alloc), ++ atomic_read_unchecked(&n_rcu_torture_alloc_fail), ++ atomic_read_unchecked(&n_rcu_torture_free), ++ atomic_read_unchecked(&n_rcu_torture_mberror), + n_rcu_torture_timers); +- if (atomic_read(&n_rcu_torture_mberror) != 0) ++ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0) + cnt += sprintf(&page[cnt], " !!!"); + cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); + if (i > 1) { + cnt += sprintf(&page[cnt], "!!! "); +- atomic_inc(&n_rcu_torture_error); ++ atomic_inc_unchecked(&n_rcu_torture_error); + WARN_ON_ONCE(1); + } + cnt += sprintf(&page[cnt], "Reader Pipe: "); +@@ -858,7 +858,7 @@ rcu_torture_printk(char *page) + cnt += sprintf(&page[cnt], "Free-Block Circulation: "); + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { + cnt += sprintf(&page[cnt], " %d", +- atomic_read(&rcu_torture_wcount[i])); ++ atomic_read_unchecked(&rcu_torture_wcount[i])); + } + cnt += sprintf(&page[cnt], "\n"); + if (cur_ops->stats) +@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void) + + if (cur_ops->cleanup) + cur_ops->cleanup(); +- if (atomic_read(&n_rcu_torture_error)) ++ if (atomic_read_unchecked(&n_rcu_torture_error)) + rcu_torture_print_module_parms("End of test: FAILURE"); + else + rcu_torture_print_module_parms("End of test: SUCCESS"); +@@ -1138,13 +1138,13 @@ rcu_torture_init(void) + + rcu_torture_current = NULL; + rcu_torture_current_version = 0; +- atomic_set(&n_rcu_torture_alloc, 0); +- atomic_set(&n_rcu_torture_alloc_fail, 0); +- atomic_set(&n_rcu_torture_free, 0); +- atomic_set(&n_rcu_torture_mberror, 0); +- atomic_set(&n_rcu_torture_error, 0); ++ atomic_set_unchecked(&n_rcu_torture_alloc, 0); ++ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0); ++ atomic_set_unchecked(&n_rcu_torture_free, 0); ++ atomic_set_unchecked(&n_rcu_torture_mberror, 0); ++ atomic_set_unchecked(&n_rcu_torture_error, 0); + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) +- atomic_set(&rcu_torture_wcount[i], 0); ++ atomic_set_unchecked(&rcu_torture_wcount[i], 0); + for_each_possible_cpu(cpu) { + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { + per_cpu(rcu_torture_count, cpu)[i] = 0; +diff --git a/kernel/rcutree.c b/kernel/rcutree.c +index 683c4f3..97f54c6 100644 +--- a/kernel/rcutree.c ++++ b/kernel/rcutree.c +@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) + /* + * Do softirq processing for the current CPU. + */ +-static void rcu_process_callbacks(struct softirq_action *unused) ++static void rcu_process_callbacks(void) + { + /* + * Memory references from any prior RCU read-side critical sections +diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h +index c03edf7..ac1b341 100644 +--- a/kernel/rcutree_plugin.h ++++ b/kernel/rcutree_plugin.h +@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu) + */ + void __rcu_read_lock(void) + { +- ACCESS_ONCE(current->rcu_read_lock_nesting)++; ++ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++; + barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ + } + EXPORT_SYMBOL_GPL(__rcu_read_lock); +@@ -251,7 +251,7 @@ void __rcu_read_unlock(void) + struct task_struct *t = current; + + barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ +- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && ++ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 && + unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) + rcu_read_unlock_special(t); + } +diff --git a/kernel/relay.c b/kernel/relay.c +index bf343f5..908e9ee 100644 +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in, + unsigned int flags, + int *nonpad_ret) + { +- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret; ++ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; + struct rchan_buf *rbuf = in->private_data; + unsigned int subbuf_size = rbuf->chan->subbuf_size; + uint64_t pos = (uint64_t) *ppos; +@@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in, + .ops = &relay_pipe_buf_ops, + .spd_release = relay_page_release, + }; ++ ssize_t ret; ++ ++ pax_track_stack(); + + if (rbuf->subbufs_produced == rbuf->subbufs_consumed) + return 0; +diff --git a/kernel/resource.c b/kernel/resource.c +index fb11a58..4e61ae1 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = { + + static int __init ioresources_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations); ++#endif ++#else + proc_create("ioports", 0, NULL, &proc_ioports_operations); + proc_create("iomem", 0, NULL, &proc_iomem_operations); ++#endif + return 0; + } + __initcall(ioresources_init); +diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c +index a56f629..1fc4989 100644 +--- a/kernel/rtmutex-tester.c ++++ b/kernel/rtmutex-tester.c +@@ -21,7 +21,7 @@ + #define MAX_RT_TEST_MUTEXES 8 + + static spinlock_t rttest_lock; +-static atomic_t rttest_event; ++static atomic_unchecked_t rttest_event; + + struct test_thread_data { + int opcode; +@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + + case RTTEST_LOCKCONT: + td->mutexes[td->opdata] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + return 0; + + case RTTEST_RESET: +@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return 0; + + case RTTEST_RESETEVENT: +- atomic_set(&rttest_event, 0); ++ atomic_set_unchecked(&rttest_event, 0); + return 0; + + default: +@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return ret; + + td->mutexes[id] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + rt_mutex_lock(&mutexes[id]); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = 4; + return 0; + +@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return ret; + + td->mutexes[id] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + ret = rt_mutex_lock_interruptible(&mutexes[id], 0); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = ret ? 0 : 4; + return ret ? -EINTR : 0; + +@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) + return ret; + +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + rt_mutex_unlock(&mutexes[id]); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = 0; + return 0; + +@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + break; + + td->mutexes[dat] = 2; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + break; + + case RTTEST_LOCKBKL: +@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + return; + + td->mutexes[dat] = 3; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + break; + + case RTTEST_LOCKNOWAIT: +@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + return; + + td->mutexes[dat] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + return; + + case RTTEST_LOCKBKL: +diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c +index 29bd4ba..8c5de90 100644 +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c +@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) + */ + spin_lock_irqsave(&pendowner->pi_lock, flags); + +- WARN_ON(!pendowner->pi_blocked_on); ++ BUG_ON(!pendowner->pi_blocked_on); + WARN_ON(pendowner->pi_blocked_on != waiter); + WARN_ON(pendowner->pi_blocked_on->lock != lock); + +diff --git a/kernel/sched.c b/kernel/sched.c +index 0591df8..db35e3d 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -5043,7 +5043,7 @@ out: + * In CONFIG_NO_HZ case, the idle load balance owner will do the + * rebalancing for all the cpus for whom scheduler ticks are stopped. + */ +-static void run_rebalance_domains(struct softirq_action *h) ++static void run_rebalance_domains(void) + { + int this_cpu = smp_processor_id(); + struct rq *this_rq = cpu_rq(this_cpu); +@@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void) + struct rq *rq; + int cpu; + ++ pax_track_stack(); ++ + need_resched: + preempt_disable(); + cpu = smp_processor_id(); +@@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule); + * Look out! "owner" is an entirely speculative pointer + * access and not reliable. + */ +-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) ++int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) + { + unsigned int cpu; + struct rq *rq; +@@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) + * DEBUG_PAGEALLOC could have unmapped it if + * the mutex owner just released it and exited. + */ +- if (probe_kernel_address(&owner->cpu, cpu)) ++ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu)) + return 0; + #else +- cpu = owner->cpu; ++ cpu = task_thread_info(owner)->cpu; + #endif + + /* +@@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) + /* + * Is that owner really running on that cpu? + */ +- if (task_thread_info(rq->curr) != owner || need_resched()) ++ if (rq->curr != owner || need_resched()) + return 0; + + cpu_relax(); +@@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p, const int nice) + /* convert nice value [19,-20] to rlimit style value [1,40] */ + int nice_rlim = 20 - nice; + ++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1); ++ + return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || + capable(CAP_SYS_NICE)); + } +@@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment) + if (nice > 19) + nice = 19; + +- if (increment < 0 && !can_nice(current, nice)) ++ if (increment < 0 && (!can_nice(current, nice) || ++ gr_handle_chroot_nice())) + return -EPERM; + + retval = security_task_setnice(current, nice); +@@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) + long power; + int weight; + +- WARN_ON(!sd || !sd->groups); ++ BUG_ON(!sd || !sd->groups); + + if (cpu != group_first_cpu(sd->groups)) + return; +diff --git a/kernel/signal.c b/kernel/signal.c +index 2494827..cda80a0 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -41,12 +41,12 @@ + + static struct kmem_cache *sigqueue_cachep; + +-static void __user *sig_handler(struct task_struct *t, int sig) ++static __sighandler_t sig_handler(struct task_struct *t, int sig) + { + return t->sighand->action[sig - 1].sa.sa_handler; + } + +-static int sig_handler_ignored(void __user *handler, int sig) ++static int sig_handler_ignored(__sighandler_t handler, int sig) + { + /* Is it explicitly or implicitly ignored? */ + return handler == SIG_IGN || +@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig) + static int sig_task_ignored(struct task_struct *t, int sig, + int from_ancestor_ns) + { +- void __user *handler; ++ __sighandler_t handler; + + handler = sig_handler(t, sig); + +@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, + */ + user = get_uid(__task_cred(t)->user); + atomic_inc(&user->sigpending); ++ ++ if (!override_rlimit) ++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1); + if (override_rlimit || + atomic_read(&user->sigpending) <= + t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) +@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default) + + int unhandled_signal(struct task_struct *tsk, int sig) + { +- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; ++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler; + if (is_global_init(tsk)) + return 1; + if (handler != SIG_IGN && handler != SIG_DFL) +@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info, + } + } + ++ /* allow glibc communication via tgkill to other threads in our ++ thread group */ ++ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL || ++ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid) ++ && gr_handle_signal(t, sig)) ++ return -EPERM; ++ + return security_task_kill(t, info, sig, 0); + } + +@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) + return send_signal(sig, info, p, 1); + } + +-static int ++int + specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + return send_signal(sig, info, t, 0); +@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + unsigned long int flags; + int ret, blocked, ignored; + struct k_sigaction *action; ++ int is_unhandled = 0; + + spin_lock_irqsave(&t->sighand->siglock, flags); + action = &t->sighand->action[sig-1]; +@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; ++ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL) ++ is_unhandled = 1; + ret = specific_send_sig_info(sig, info, t); + spin_unlock_irqrestore(&t->sighand->siglock, flags); + ++ /* only deal with unhandled signals, java etc trigger SIGSEGV during ++ normal operation */ ++ if (is_unhandled) { ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t); ++ gr_handle_crash(t, sig); ++ } ++ + return ret; + } + +@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) + { + int ret = check_kill_permission(sig, info, p); + +- if (!ret && sig) ++ if (!ret && sig) { + ret = do_send_sig_info(sig, info, p, true); ++ if (!ret) ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p); ++ } + + return ret; + } +@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code) + { + siginfo_t info; + ++ pax_track_stack(); ++ + BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); + + memset(&info, 0, sizeof info); +@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) + int error = -ESRCH; + + rcu_read_lock(); +- p = find_task_by_vpid(pid); ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ /* allow glibc communication via tgkill to other threads in our ++ thread group */ ++ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL && ++ sig == (SIGRTMIN+1) && tgid == info->si_pid) ++ p = find_task_by_vpid_unrestricted(pid); ++ else ++#endif ++ p = find_task_by_vpid(pid); + if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { + error = check_kill_permission(sig, info, p); + /* +diff --git a/kernel/smp.c b/kernel/smp.c +index aa9cff3..631a0de 100644 +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait) + } + EXPORT_SYMBOL(smp_call_function); + +-void ipi_call_lock(void) ++void ipi_call_lock(void) __acquires(call_function.lock) + { + spin_lock(&call_function.lock); + } + +-void ipi_call_unlock(void) ++void ipi_call_unlock(void) __releases(call_function.lock) + { + spin_unlock(&call_function.lock); + } + +-void ipi_call_lock_irq(void) ++void ipi_call_lock_irq(void) __acquires(call_function.lock) + { + spin_lock_irq(&call_function.lock); + } + +-void ipi_call_unlock_irq(void) ++void ipi_call_unlock_irq(void) __releases(call_function.lock) + { + spin_unlock_irq(&call_function.lock); + } +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 04a0252..580c512 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp + + static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); + +-char *softirq_to_name[NR_SOFTIRQS] = { ++const char * const softirq_to_name[NR_SOFTIRQS] = { + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; +@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip); + + asmlinkage void __do_softirq(void) + { +- struct softirq_action *h; ++ const struct softirq_action *h; + __u32 pending; + int max_restart = MAX_SOFTIRQ_RESTART; + int cpu; +@@ -233,7 +233,7 @@ restart: + kstat_incr_softirqs_this_cpu(h - softirq_vec); + + trace_softirq_entry(h, softirq_vec); +- h->action(h); ++ h->action(); + trace_softirq_exit(h, softirq_vec); + if (unlikely(prev_count != preempt_count())) { + printk(KERN_ERR "huh, entered softirq %td %s %p" +@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr) + local_irq_restore(flags); + } + +-void open_softirq(int nr, void (*action)(struct softirq_action *)) ++void open_softirq(int nr, void (*action)(void)) + { +- softirq_vec[nr].action = action; ++ pax_open_kernel(); ++ *(void **)&softirq_vec[nr].action = action; ++ pax_close_kernel(); + } + + /* +@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) + + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static void tasklet_action(struct softirq_action *a) ++static void tasklet_action(void) + { + struct tasklet_struct *list; + +@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a) + } + } + +-static void tasklet_hi_action(struct softirq_action *a) ++static void tasklet_hi_action(void) + { + struct tasklet_struct *list; + +diff --git a/kernel/sys.c b/kernel/sys.c +index e9512b1..f07185f 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error) + error = -EACCES; + goto out; + } ++ ++ if (gr_handle_chroot_setpriority(p, niceval)) { ++ error = -EACCES; ++ goto out; ++ } ++ + no_nice = security_task_setnice(p, niceval); + if (no_nice) { + error = no_nice; +@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) + !(user = find_user(who))) + goto out_unlock; /* No processes for this user */ + +- do_each_thread(g, p) ++ do_each_thread(g, p) { + if (__task_cred(p)->uid == who) + error = set_one_prio(p, niceval, error); +- while_each_thread(g, p); ++ } while_each_thread(g, p); + if (who != cred->uid) + free_uid(user); /* For find_user() */ + break; +@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) + !(user = find_user(who))) + goto out_unlock; /* No processes for this user */ + +- do_each_thread(g, p) ++ do_each_thread(g, p) { + if (__task_cred(p)->uid == who) { + niceval = 20 - task_nice(p); + if (niceval > retval) + retval = niceval; + } +- while_each_thread(g, p); ++ } while_each_thread(g, p); + if (who != cred->uid) + free_uid(user); /* for find_user() */ + break; +@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) + goto error; + } + ++ if (gr_check_group_change(new->gid, new->egid, -1)) ++ goto error; ++ + if (rgid != (gid_t) -1 || + (egid != (gid_t) -1 && egid != old->gid)) + new->sgid = new->egid; +@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) + goto error; + + retval = -EPERM; ++ ++ if (gr_check_group_change(gid, gid, gid)) ++ goto error; ++ + if (capable(CAP_SETGID)) + new->gid = new->egid = new->sgid = new->fsgid = gid; + else if (gid == old->gid || gid == old->sgid) +@@ -559,7 +572,7 @@ error: + /* + * change the user struct in a credentials set to match the new UID + */ +-static int set_user(struct cred *new) ++int set_user(struct cred *new) + { + struct user_struct *new_user; + +@@ -567,12 +580,19 @@ static int set_user(struct cred *new) + if (!new_user) + return -EAGAIN; + ++ /* ++ * We don't fail in case of NPROC limit excess here because too many ++ * poorly written programs don't check set*uid() return code, assuming ++ * it never fails if called by root. We may still enforce NPROC limit ++ * for programs doing set*uid()+execve() by harmlessly deferring the ++ * failure to the execve() stage. ++ */ + if (atomic_read(&new_user->processes) >= + current->signal->rlim[RLIMIT_NPROC].rlim_cur && +- new_user != INIT_USER) { +- free_uid(new_user); +- return -EAGAIN; +- } ++ new_user != INIT_USER) ++ current->flags |= PF_NPROC_EXCEEDED; ++ else ++ current->flags &= ~PF_NPROC_EXCEEDED; + + free_uid(new->user); + new->user = new_user; +@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) + goto error; + } + ++ if (gr_check_user_change(new->uid, new->euid, -1)) ++ goto error; ++ + if (new->uid != old->uid) { + retval = set_user(new); + if (retval < 0) +@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) + goto error; + + retval = -EPERM; ++ ++ if (gr_check_crash_uid(uid)) ++ goto error; ++ if (gr_check_user_change(uid, uid, uid)) ++ goto error; ++ + if (capable(CAP_SETUID)) { + new->suid = new->uid = uid; + if (uid != old->uid) { +@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) + goto error; + } + ++ if (gr_check_user_change(ruid, euid, -1)) ++ goto error; ++ + if (ruid != (uid_t) -1) { + new->uid = ruid; + if (ruid != old->uid) { +@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) + goto error; + } + ++ if (gr_check_group_change(rgid, egid, -1)) ++ goto error; ++ + if (rgid != (gid_t) -1) + new->gid = rgid; + if (egid != (gid_t) -1) +@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) + if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0) + goto error; + ++ if (gr_check_user_change(-1, -1, uid)) ++ goto error; ++ + if (uid == old->uid || uid == old->euid || + uid == old->suid || uid == old->fsuid || + capable(CAP_SETUID)) { +@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) + if (gid == old->gid || gid == old->egid || + gid == old->sgid || gid == old->fsgid || + capable(CAP_SETGID)) { ++ if (gr_check_group_change(-1, -1, gid)) ++ goto error; ++ + if (gid != old_fsgid) { + new->fsgid = gid; + goto change_okay; +@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, + error = get_dumpable(me->mm); + break; + case PR_SET_DUMPABLE: +- if (arg2 < 0 || arg2 > 1) { ++ if (arg2 > 1) { + error = -EINVAL; + break; + } +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index b8bd058..ab6a76be 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -63,6 +63,13 @@ + static int deprecated_sysctl_warning(struct __sysctl_args *args); + + #if defined(CONFIG_SYSCTL) ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++extern __u32 gr_handle_sysctl(const ctl_table *table, const int op); ++extern int gr_handle_sysctl_mod(const char *dirname, const char *name, ++ const int op); ++extern int gr_handle_chroot_sysctl(const int op); + + /* External variables not in a header file. */ + extern int C_A_D; +@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, + static int proc_taint(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + #endif ++extern ctl_table grsecurity_table[]; + + static struct ctl_table root_table[]; + static struct ctl_table_root sysctl_table_root; +@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[]; + int sysctl_legacy_va_layout; + #endif + ++#ifdef CONFIG_PAX_SOFTMODE ++static ctl_table pax_table[] = { ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "softmode", ++ .data = &pax_softmode, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ ++ { .ctl_name = 0 } ++}; ++#endif ++ + extern int prove_locking; + extern int lock_stat; + +@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ + #endif + + static struct ctl_table kern_table[] = { ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "grsecurity", ++ .mode = 0500, ++ .child = grsecurity_table, ++ }, ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "pax", ++ .mode = 0500, ++ .child = pax_table, ++ }, ++#endif ++ + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sched_child_runs_first", +@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = { + .data = &modprobe_path, + .maxlen = KMOD_PATH_LEN, + .mode = 0644, +- .proc_handler = &proc_dostring, +- .strategy = &sysctl_string, ++ .proc_handler = &proc_dostring_modpriv, ++ .strategy = &sysctl_string_modpriv, + }, + { + .ctl_name = CTL_UNNUMBERED, +@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = { + .mode = 0644, + .proc_handler = &proc_dointvec + }, ++ { ++ .procname = "heap_stack_gap", ++ .data = &sysctl_heap_stack_gap, ++ .maxlen = sizeof(sysctl_heap_stack_gap), ++ .mode = 0644, ++ .proc_handler = proc_doulongvec_minmax, ++ }, + #else + { + .ctl_name = CTL_UNNUMBERED, +@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root, + return 0; + } + ++static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op); ++ + static int parse_table(int __user *name, int nlen, + void __user *oldval, size_t __user *oldlenp, + void __user *newval, size_t newlen, +@@ -1821,7 +1871,7 @@ repeat: + if (n == table->ctl_name) { + int error; + if (table->child) { +- if (sysctl_perm(root, table, MAY_EXEC)) ++ if (sysctl_perm_nochk(root, table, MAY_EXEC)) + return -EPERM; + name++; + nlen--; +@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op) + int error; + int mode; + ++ if (table->parent != NULL && table->parent->procname != NULL && ++ table->procname != NULL && ++ gr_handle_sysctl_mod(table->parent->procname, table->procname, op)) ++ return -EACCES; ++ if (gr_handle_chroot_sysctl(op)) ++ return -EACCES; ++ error = gr_handle_sysctl(table, op); ++ if (error) ++ return error; ++ ++ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC)); ++ if (error) ++ return error; ++ ++ if (root->permissions) ++ mode = root->permissions(root, current->nsproxy, table); ++ else ++ mode = table->mode; ++ ++ return test_perm(mode, op); ++} ++ ++int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op) ++{ ++ int error; ++ int mode; ++ + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC)); + if (error) + return error; +@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write, + buffer, lenp, ppos); + } + ++int proc_dostring_modpriv(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ if (write && !capable(CAP_SYS_MODULE)) ++ return -EPERM; ++ ++ return _proc_do_string(table->data, table->maxlen, write, ++ buffer, lenp, ppos); ++} ++ + + static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp, + int *valp, +@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int + vleft = table->maxlen / sizeof(unsigned long); + left = *lenp; + +- for (; left && vleft--; i++, min++, max++, first=0) { ++ for (; left && vleft--; i++, first=0) { + if (write) { + while (left) { + char c; +@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write, + return -ENOSYS; + } + ++int proc_dostring_modpriv(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ return -ENOSYS; ++} ++ + int proc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table, + return 1; + } + ++int sysctl_string_modpriv(struct ctl_table *table, ++ void __user *oldval, size_t __user *oldlenp, ++ void __user *newval, size_t newlen) ++{ ++ if (newval && newlen && !capable(CAP_SYS_MODULE)) ++ return -EPERM; ++ ++ return sysctl_string(table, oldval, oldlenp, newval, newlen); ++} ++ + /* + * This function makes sure that all of the integers in the vector + * are between the minimum and maximum values given in the arrays +@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table, + return -ENOSYS; + } + ++int sysctl_string_modpriv(struct ctl_table *table, ++ void __user *oldval, size_t __user *oldlenp, ++ void __user *newval, size_t newlen) ++{ ++ return -ENOSYS; ++} ++ + int sysctl_intvec(struct ctl_table *table, + void __user *oldval, size_t __user *oldlenp, + void __user *newval, size_t newlen) +@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax); + EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); + EXPORT_SYMBOL(proc_dointvec_ms_jiffies); + EXPORT_SYMBOL(proc_dostring); ++EXPORT_SYMBOL(proc_dostring_modpriv); + EXPORT_SYMBOL(proc_doulongvec_minmax); + EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax); + EXPORT_SYMBOL(register_sysctl_table); +@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec); + EXPORT_SYMBOL(sysctl_jiffies); + EXPORT_SYMBOL(sysctl_ms_jiffies); + EXPORT_SYMBOL(sysctl_string); ++EXPORT_SYMBOL(sysctl_string_modpriv); + EXPORT_SYMBOL(sysctl_data); + EXPORT_SYMBOL(unregister_sysctl_table); +diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c +index 469193c..ea3ecb2 100644 +--- a/kernel/sysctl_check.c ++++ b/kernel/sysctl_check.c +@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) + } else { + if ((table->strategy == sysctl_data) || + (table->strategy == sysctl_string) || ++ (table->strategy == sysctl_string_modpriv) || + (table->strategy == sysctl_intvec) || + (table->strategy == sysctl_jiffies) || + (table->strategy == sysctl_ms_jiffies) || + (table->proc_handler == proc_dostring) || ++ (table->proc_handler == proc_dostring_modpriv) || + (table->proc_handler == proc_dointvec) || + (table->proc_handler == proc_dointvec_minmax) || + (table->proc_handler == proc_dointvec_jiffies) || +diff --git a/kernel/taskstats.c b/kernel/taskstats.c +index a4ef542..798bcd7 100644 +--- a/kernel/taskstats.c ++++ b/kernel/taskstats.c +@@ -26,9 +26,12 @@ + #include <linux/cgroup.h> + #include <linux/fs.h> + #include <linux/file.h> ++#include <linux/grsecurity.h> + #include <net/genetlink.h> + #include <asm/atomic.h> + ++extern int gr_is_taskstats_denied(int pid); ++ + /* + * Maximum length of a cpumask that can be specified in + * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute +@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) + size_t size; + cpumask_var_t mask; + ++ if (gr_is_taskstats_denied(current->pid)) ++ return -EACCES; ++ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + +diff --git a/kernel/time.c b/kernel/time.c +index 33df60e..ca768bd 100644 +--- a/kernel/time.c ++++ b/kernel/time.c +@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz) + return error; + + if (tz) { ++ /* we log in do_settimeofday called below, so don't log twice ++ */ ++ if (!tv) ++ gr_log_timechange(); ++ + /* SMP safe, global irq locking makes it work. */ + sys_tz = *tz; + update_vsyscall_tz(); +@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time); + * Avoid unnecessary multiplications/divisions in the + * two most common HZ cases: + */ +-unsigned int inline jiffies_to_msecs(const unsigned long j) ++inline unsigned int jiffies_to_msecs(const unsigned long j) + { + #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j) + } + EXPORT_SYMBOL(jiffies_to_msecs); + +-unsigned int inline jiffies_to_usecs(const unsigned long j) ++inline unsigned int jiffies_to_usecs(const unsigned long j) + { + #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) + return (USEC_PER_SEC / HZ) * j; +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c +index 57b953f..06f149f 100644 +--- a/kernel/time/tick-broadcast.c ++++ b/kernel/time/tick-broadcast.c +@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) + * then clear the broadcast bit. + */ + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { +- int cpu = smp_processor_id(); ++ cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); + tick_broadcast_clear_oneshot(cpu); +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index 4a71cff..ffb5548 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -14,6 +14,7 @@ + #include <linux/init.h> + #include <linux/mm.h> + #include <linux/sched.h> ++#include <linux/grsecurity.h> + #include <linux/sysdev.h> + #include <linux/clocksource.h> + #include <linux/jiffies.h> +@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec) + */ + struct timespec ts = xtime; + timespec_add_ns(&ts, nsec); +- ACCESS_ONCE(xtime_cache) = ts; ++ ACCESS_ONCE_RW(xtime_cache) = ts; + } + + /* must hold xtime_lock */ +@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv) + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + ++ gr_log_timechange(); ++ + write_seqlock_irqsave(&xtime_lock, flags); + + timekeeping_forward_now(); +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c +index 54c0dda..e9095d9 100644 +--- a/kernel/time/timer_list.c ++++ b/kernel/time/timer_list.c +@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); + + static void print_name_offset(struct seq_file *m, void *sym) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name((unsigned long)sym, symname) < 0) + SEQ_printf(m, "<%p>", sym); + else + SEQ_printf(m, "%s", symname); ++#endif + } + + static void +@@ -112,7 +116,11 @@ next_one: + static void + print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, " .base: %p\n", NULL); ++#else + SEQ_printf(m, " .base: %p\n", base); ++#endif + SEQ_printf(m, " .index: %d\n", + base->index); + SEQ_printf(m, " .resolution: %Lu nsecs\n", +@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void) + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops); ++#else + pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c +index ee5681f..634089b 100644 +--- a/kernel/time/timer_stats.c ++++ b/kernel/time/timer_stats.c +@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop; + static unsigned long nr_entries; + static struct entry entries[MAX_ENTRIES]; + +-static atomic_t overflow_count; ++static atomic_unchecked_t overflow_count; + + /* + * The entries are in a hash-table, for fast lookup: +@@ -140,7 +140,7 @@ static void reset_entries(void) + nr_entries = 0; + memset(entries, 0, sizeof(entries)); + memset(tstat_hash_table, 0, sizeof(tstat_hash_table)); +- atomic_set(&overflow_count, 0); ++ atomic_set_unchecked(&overflow_count, 0); + } + + static struct entry *alloc_entry(void) +@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + if (likely(entry)) + entry->count++; + else +- atomic_inc(&overflow_count); ++ atomic_inc_unchecked(&overflow_count); + + out_unlock: + spin_unlock_irqrestore(lock, flags); +@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + + static void print_name_offset(struct seq_file *m, unsigned long addr) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name(addr, symname) < 0) + seq_printf(m, "<%p>", (void *)addr); + else + seq_printf(m, "%s", symname); ++#endif + } + + static int tstats_show(struct seq_file *m, void *v) +@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v) + + seq_puts(m, "Timer Stats Version: v0.2\n"); + seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); +- if (atomic_read(&overflow_count)) ++ if (atomic_read_unchecked(&overflow_count)) + seq_printf(m, "Overflow: %d entries\n", +- atomic_read(&overflow_count)); ++ atomic_read_unchecked(&overflow_count)); + + for (i = 0; i < nr_entries; i++) { + entry = entries + i; +@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void) + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops); ++#else + pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff --git a/kernel/timer.c b/kernel/timer.c +index cb3c1f1..8bf5526 100644 +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick) + /* + * This function runs timers and the timer-tq in bottom half context. + */ +-static void run_timer_softirq(struct softirq_action *h) ++static void run_timer_softirq(void) + { + struct tvec_base *base = __get_cpu_var(tvec_bases); + +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index d9d6206..f19467e 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, + struct blk_trace *bt = filp->private_data; + char buf[16]; + +- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); ++ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped)); + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + } +@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, + return 1; + + bt = buf->chan->private_data; +- atomic_inc(&bt->dropped); ++ atomic_inc_unchecked(&bt->dropped); + return 0; + } + +@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + + bt->dir = dir; + bt->dev = dev; +- atomic_set(&bt->dropped, 0); ++ atomic_set_unchecked(&bt->dropped, 0); + + ret = -EIO; + bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 4872937..c794d40 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) + + ip = rec->ip; + ++ ret = ftrace_arch_code_modify_prepare(); ++ FTRACE_WARN_ON(ret); ++ if (ret) ++ return 0; ++ + ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); ++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process()); + if (ret) { + ftrace_bug(ret, ip); + rec->flags |= FTRACE_FL_FAILED; +- return 0; + } +- return 1; ++ return ret ? 0 : 1; + } + + /* +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index e749a05..19c6e94 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list) + * the reader page). But if the next page is a header page, + * its flags will be non zero. + */ +-static int inline ++static inline int + rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, + struct buffer_page *page, struct list_head *list) + { +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index a2a2d1f..7f32b09 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, + size_t rem; + unsigned int i; + ++ pax_track_stack(); ++ + /* copy the tracer to avoid using a global lock all around */ + mutex_lock(&trace_types_lock); + if (unlikely(old_tracer != current_trace && current_trace)) { +@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + int entries, size, i; + size_t ret; + ++ pax_track_stack(); ++ + if (*ppos & (PAGE_SIZE - 1)) { + WARN_ONCE(1, "Ftrace: previous read must page-align\n"); + return -EINVAL; +@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = { + }; + #endif + +-static struct dentry *d_tracer; +- + struct dentry *tracing_init_dentry(void) + { ++ static struct dentry *d_tracer; + static int once; + + if (d_tracer) +@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void) + return d_tracer; + } + +-static struct dentry *d_percpu; +- + struct dentry *tracing_dentry_percpu(void) + { ++ static struct dentry *d_percpu; + static int once; + struct dentry *d_tracer; + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index d128f65..f37b4af 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list); + * Modules must own their file_operations to keep up with + * reference counting. + */ ++ + struct ftrace_module_file_ops { + struct list_head list; + struct module *mod; +- struct file_operations id; +- struct file_operations enable; +- struct file_operations format; +- struct file_operations filter; + }; + + static void remove_subsystem_dir(const char *name) +@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod) + + file_ops->mod = mod; + +- file_ops->id = ftrace_event_id_fops; +- file_ops->id.owner = mod; +- +- file_ops->enable = ftrace_enable_fops; +- file_ops->enable.owner = mod; +- +- file_ops->filter = ftrace_event_filter_fops; +- file_ops->filter.owner = mod; +- +- file_ops->format = ftrace_event_format_fops; +- file_ops->format.owner = mod; ++ pax_open_kernel(); ++ *(void **)&mod->trace_id.owner = mod; ++ *(void **)&mod->trace_enable.owner = mod; ++ *(void **)&mod->trace_filter.owner = mod; ++ *(void **)&mod->trace_format.owner = mod; ++ pax_close_kernel(); + + list_add(&file_ops->list, &ftrace_module_file_list); + +@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod) + call->mod = mod; + list_add(&call->list, &ftrace_events); + event_create_dir(call, d_events, +- &file_ops->id, &file_ops->enable, +- &file_ops->filter, &file_ops->format); ++ &mod->trace_id, &mod->trace_enable, ++ &mod->trace_filter, &mod->trace_format); + } + } + +diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c +index 0acd834..b800b56 100644 +--- a/kernel/trace/trace_mmiotrace.c ++++ b/kernel/trace/trace_mmiotrace.c +@@ -23,7 +23,7 @@ struct header_iter { + static struct trace_array *mmio_trace_array; + static bool overrun_detected; + static unsigned long prev_overruns; +-static atomic_t dropped_count; ++static atomic_unchecked_t dropped_count; + + static void mmio_reset_data(struct trace_array *tr) + { +@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter) + + static unsigned long count_overruns(struct trace_iterator *iter) + { +- unsigned long cnt = atomic_xchg(&dropped_count, 0); ++ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0); + unsigned long over = ring_buffer_overruns(iter->tr->buffer); + + if (over > prev_overruns) +@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, + sizeof(*entry), 0, pc); + if (!event) { +- atomic_inc(&dropped_count); ++ atomic_inc_unchecked(&dropped_count); + return; + } + entry = ring_buffer_event_data(event); +@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, + sizeof(*entry), 0, pc); + if (!event) { +- atomic_inc(&dropped_count); ++ atomic_inc_unchecked(&dropped_count); + return; + } + entry = ring_buffer_event_data(event); +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c +index b6c12c6..41fdc53 100644 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path) + return 0; + p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); + if (!IS_ERR(p)) { +- p = mangle_path(s->buffer + s->len, p, "\n"); ++ p = mangle_path(s->buffer + s->len, p, "\n\\"); + if (p) { + s->len = p - s->buffer; + return 1; +diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c +index 8504ac7..ecf0adb 100644 +--- a/kernel/trace/trace_stack.c ++++ b/kernel/trace/trace_stack.c +@@ -50,7 +50,7 @@ static inline void check_stack(void) + return; + + /* we do not handle interrupt stacks yet */ +- if (!object_is_on_stack(&this_size)) ++ if (!object_starts_on_stack(&this_size)) + return; + + local_irq_save(flags); +diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c +index 40cafb0..d5ead43 100644 +--- a/kernel/trace/trace_workqueue.c ++++ b/kernel/trace/trace_workqueue.c +@@ -21,7 +21,7 @@ struct cpu_workqueue_stats { + int cpu; + pid_t pid; + /* Can be inserted from interrupt or user context, need to be atomic */ +- atomic_t inserted; ++ atomic_unchecked_t inserted; + /* + * Don't need to be atomic, works are serialized in a single workqueue thread + * on a single CPU. +@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { + if (node->pid == wq_thread->pid) { +- atomic_inc(&node->inserted); ++ atomic_inc_unchecked(&node->inserted); + goto found; + } + } +@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p) + tsk = get_pid_task(pid, PIDTYPE_PID); + if (tsk) { + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, +- atomic_read(&cws->inserted), cws->executed, ++ atomic_read_unchecked(&cws->inserted), cws->executed, + tsk->comm); + put_task_struct(tsk); + } +diff --git a/kernel/user.c b/kernel/user.c +index 1b91701..8795237 100644 +--- a/kernel/user.c ++++ b/kernel/user.c +@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) + spin_lock_irq(&uidhash_lock); + up = uid_hash_find(uid, hashent); + if (up) { ++ put_user_ns(ns); + key_put(new->uid_keyring); + key_put(new->session_keyring); + kmem_cache_free(uid_cachep, new); +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 234ceb1..ad74049 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -905,7 +905,7 @@ config LATENCYTOP + select STACKTRACE + select SCHEDSTATS + select SCHED_DEBUG +- depends on HAVE_LATENCYTOP_SUPPORT ++ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM + help + Enable this option if you want to use the LatencyTOP tool + to find out which userspace is blocking on what kernel operations. +diff --git a/lib/bitmap.c b/lib/bitmap.c +index 7025658..8d14cab 100644 +--- a/lib/bitmap.c ++++ b/lib/bitmap.c +@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, + { + int c, old_c, totaldigits, ndigits, nchunks, nbits; + u32 chunk; +- const char __user *ubuf = buf; ++ const char __user *ubuf = (const char __force_user *)buf; + + bitmap_zero(maskp, nmaskbits); + +@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf, + { + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; +- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits); ++ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits); + } + EXPORT_SYMBOL(bitmap_parse_user); + +diff --git a/lib/bug.c b/lib/bug.c +index 300e41a..2779eb0 100644 +--- a/lib/bug.c ++++ b/lib/bug.c +@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) + return BUG_TRAP_TYPE_NONE; + + bug = find_bug(bugaddr); ++ if (!bug) ++ return BUG_TRAP_TYPE_NONE; + + printk(KERN_EMERG "------------[ cut here ]------------\n"); + +diff --git a/lib/debugobjects.c b/lib/debugobjects.c +index 2b413db..e21d207 100644 +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c +@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack) + if (limit > 4) + return; + +- is_on_stack = object_is_on_stack(addr); ++ is_on_stack = object_starts_on_stack(addr); + if (is_on_stack == onstack) + return; + +diff --git a/lib/devres.c b/lib/devres.c +index 72c8909..7543868 100644 +--- a/lib/devres.c ++++ b/lib/devres.c +@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr) + { + iounmap(addr); + WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, +- (void *)addr)); ++ (void __force *)addr)); + } + EXPORT_SYMBOL(devm_iounmap); + +@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) + { + ioport_unmap(addr); + WARN_ON(devres_destroy(dev, devm_ioport_map_release, +- devm_ioport_map_match, (void *)addr)); ++ devm_ioport_map_match, (void __force *)addr)); + } + EXPORT_SYMBOL(devm_ioport_unmap); + +diff --git a/lib/dma-debug.c b/lib/dma-debug.c +index 084e879..0674448 100644 +--- a/lib/dma-debug.c ++++ b/lib/dma-debug.c +@@ -861,7 +861,7 @@ out: + + static void check_for_stack(struct device *dev, void *addr) + { +- if (object_is_on_stack(addr)) ++ if (object_starts_on_stack(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from" + "stack [addr=%p]\n", addr); + } +diff --git a/lib/idr.c b/lib/idr.c +index eda7ba3..915dfae 100644 +--- a/lib/idr.c ++++ b/lib/idr.c +@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) + id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; + + /* if already at the top layer, we need to grow */ +- if (id >= 1 << (idp->layers * IDR_BITS)) { ++ if (id >= (1 << (idp->layers * IDR_BITS))) { + *starting_id = id; + return IDR_NEED_TO_GROW; + } +diff --git a/lib/inflate.c b/lib/inflate.c +index d102559..4215f31 100644 +--- a/lib/inflate.c ++++ b/lib/inflate.c +@@ -266,7 +266,7 @@ static void free(void *where) + malloc_ptr = free_mem_ptr; + } + #else +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + #endif + +diff --git a/lib/ioremap.c b/lib/ioremap.c +index 14c6078..65526a1 100644 +--- a/lib/ioremap.c ++++ b/lib/ioremap.c +@@ -37,7 +37,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, + unsigned long next; + + phys_addr -= addr; +- pmd = pmd_alloc(&init_mm, pud, addr); ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -55,7 +55,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, + unsigned long next; + + phys_addr -= addr; +- pud = pud_alloc(&init_mm, pgd, addr); ++ pud = pud_alloc_kernel(&init_mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { +diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c +index bd2bea9..6b3c95e 100644 +--- a/lib/is_single_threaded.c ++++ b/lib/is_single_threaded.c +@@ -22,6 +22,9 @@ bool current_is_single_threaded(void) + struct task_struct *p, *t; + bool ret; + ++ if (!mm) ++ return true; ++ + if (atomic_read(&task->signal->live) != 1) + return false; + +diff --git a/lib/kobject.c b/lib/kobject.c +index b512b74..8115eb1 100644 +--- a/lib/kobject.c ++++ b/lib/kobject.c +@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, + return ret; + } + +-struct sysfs_ops kobj_sysfs_ops = { ++const struct sysfs_ops kobj_sysfs_ops = { + .show = kobj_attr_show, + .store = kobj_attr_store, + }; +@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = { + * If the kset was not able to be created, NULL will be returned. + */ + static struct kset *kset_create(const char *name, +- struct kset_uevent_ops *uevent_ops, ++ const struct kset_uevent_ops *uevent_ops, + struct kobject *parent_kobj) + { + struct kset *kset; +@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name, + * If the kset was not able to be created, NULL will be returned. + */ + struct kset *kset_create_and_add(const char *name, +- struct kset_uevent_ops *uevent_ops, ++ const struct kset_uevent_ops *uevent_ops, + struct kobject *parent_kobj) + { + struct kset *kset; +diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c +index 507b821..0bf8ed0 100644 +--- a/lib/kobject_uevent.c ++++ b/lib/kobject_uevent.c +@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, + const char *subsystem; + struct kobject *top_kobj; + struct kset *kset; +- struct kset_uevent_ops *uevent_ops; ++ const struct kset_uevent_ops *uevent_ops; + u64 seq; + int i = 0; + int retval = 0; +diff --git a/lib/kref.c b/lib/kref.c +index 9ecd6e8..12c94c1 100644 +--- a/lib/kref.c ++++ b/lib/kref.c +@@ -61,7 +61,7 @@ void kref_get(struct kref *kref) + */ + int kref_put(struct kref *kref, void (*release)(struct kref *kref)) + { +- WARN_ON(release == NULL); ++ BUG_ON(release == NULL); + WARN_ON(release == (void (*)(struct kref *))kfree); + + if (atomic_dec_and_test(&kref->refcount)) { +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index 92cdd99..a8149d7 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -81,7 +81,7 @@ struct radix_tree_preload { + int nr; + struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; + }; +-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; ++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); + + static inline gfp_t root_gfp_mask(struct radix_tree_root *root) + { +diff --git a/lib/random32.c b/lib/random32.c +index 217d5c4..45aba8a 100644 +--- a/lib/random32.c ++++ b/lib/random32.c +@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state) + */ + static inline u32 __seed(u32 x, u32 m) + { +- return (x < m) ? x + m : x; ++ return (x <= m) ? x + m + 1 : x; + } + + /** +diff --git a/lib/vsprintf.c b/lib/vsprintf.c +index 33bed5e..1477e46 100644 +--- a/lib/vsprintf.c ++++ b/lib/vsprintf.c +@@ -16,6 +16,9 @@ + * - scnprintf and vscnprintf + */ + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <stdarg.h> + #include <linux/module.h> + #include <linux/types.h> +@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num, + return buf; + } + +-static char *string(char *buf, char *end, char *s, struct printf_spec spec) ++static char *string(char *buf, char *end, const char *s, struct printf_spec spec) + { + int len, i; + + if ((unsigned long)s < PAGE_SIZE) +- s = "<NULL>"; ++ s = "(null)"; + + len = strnlen(s, spec.precision); + +@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr, + unsigned long value = (unsigned long) ptr; + #ifdef CONFIG_KALLSYMS + char sym[KSYM_SYMBOL_LEN]; +- if (ext != 'f' && ext != 's') ++ if (ext != 'f' && ext != 's' && ext != 'a') + sprint_symbol(sym, value); + else + kallsyms_lookup(value, NULL, NULL, NULL, sym); +@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, + * - 'f' For simple symbolic function names without offset + * - 'S' For symbolic direct pointers with offset + * - 's' For symbolic direct pointers without offset ++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM ++ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM + * - 'R' For a struct resource pointer, it prints the range of + * addresses (not the name nor the flags) + * - 'M' For a 6-byte MAC address, it prints the address in the +@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, + struct printf_spec spec) + { + if (!ptr) +- return string(buf, end, "(null)", spec); ++ return string(buf, end, "(nil)", spec); + + switch (*fmt) { + case 'F': +@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, + case 's': + /* Fallthrough */ + case 'S': ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ break; ++#else ++ return symbol_string(buf, end, ptr, spec, *fmt); ++#endif ++ case 'a': ++ /* Fallthrough */ ++ case 'A': + return symbol_string(buf, end, ptr, spec, *fmt); + case 'R': + return resource_string(buf, end, ptr, spec); +@@ -1445,7 +1458,7 @@ do { \ + size_t len; + if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE + || (unsigned long)save_str < PAGE_SIZE) +- save_str = "<NULL>"; ++ save_str = "(null)"; + len = strlen(save_str); + if (str + len + 1 < end) + memcpy(str, save_str, len + 1); +@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) + typeof(type) value; \ + if (sizeof(type) == 8) { \ + args = PTR_ALIGN(args, sizeof(u32)); \ +- *(u32 *)&value = *(u32 *)args; \ +- *((u32 *)&value + 1) = *(u32 *)(args + 4); \ ++ *(u32 *)&value = *(const u32 *)args; \ ++ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \ + } else { \ + args = PTR_ALIGN(args, sizeof(type)); \ +- value = *(typeof(type) *)args; \ ++ value = *(const typeof(type) *)args; \ + } \ + args += sizeof(type); \ + value; \ +@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) + const char *str_arg = args; + size_t len = strlen(str_arg); + args += len + 1; +- str = string(str, end, (char *)str_arg, spec); ++ str = string(str, end, str_arg, spec); + break; + } + +diff --git a/localversion-grsec b/localversion-grsec +new file mode 100644 +index 0000000..7cd6065 +--- /dev/null ++++ b/localversion-grsec +@@ -0,0 +1 @@ ++-grsec +diff --git a/mm/Kconfig b/mm/Kconfig +index 2c19c0b..f3c3f83 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -228,7 +228,7 @@ config KSM + config DEFAULT_MMAP_MIN_ADDR + int "Low address space to protect from user allocation" + depends on MMU +- default 4096 ++ default 65536 + help + This is the portion of low virtual memory which should be protected + from userspace allocation. Keeping a user from writing to low pages +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index d824401..9f5244a 100644 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -271,7 +271,7 @@ static void bdi_task_init(struct backing_dev_info *bdi, + list_add_tail_rcu(&wb->list, &bdi->wb_list); + spin_unlock(&bdi->wb_lock); + +- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE; ++ tsk->flags |= PF_SWAPWRITE; + set_freezable(); + + /* +@@ -489,7 +489,7 @@ static void bdi_add_to_pending(struct rcu_head *head) + * Add the default flusher task that gets created for any bdi + * that has dirty data pending writeout + */ +-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) ++static void bdi_add_default_flusher_task(struct backing_dev_info *bdi) + { + if (!bdi_cap_writeback_dirty(bdi)) + return; +diff --git a/mm/filemap.c b/mm/filemap.c +index a1fe378..e26702f 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) + struct address_space *mapping = file->f_mapping; + + if (!mapping->a_ops->readpage) +- return -ENOEXEC; ++ return -ENODEV; + file_accessed(file); + vma->vm_ops = &generic_file_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; +@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i + *pos = i_size_read(inode); + + if (limit != RLIM_INFINITY) { ++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0); + if (*pos >= limit) { + send_sig(SIGXFSZ, current, 0); + return -EFBIG; +diff --git a/mm/fremap.c b/mm/fremap.c +index b6ec85a..a24ac22 100644 +--- a/mm/fremap.c ++++ b/mm/fremap.c +@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + retry: + vma = find_vma(mm, start); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC)) ++ goto out; ++#endif ++ + /* + * Make sure the vma is shared, that it supports prefaulting, + * and that the remapped range is valid and fully within +@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + /* + * drop PG_Mlocked flag for over-mapped range + */ +- unsigned int saved_flags = vma->vm_flags; ++ unsigned long saved_flags = vma->vm_flags; + munlock_vma_pages_range(vma, start, start + size); + vma->vm_flags = saved_flags; + } +diff --git a/mm/highmem.c b/mm/highmem.c +index 9c1e627..5ca9447 100644 +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void) + * So no dangers, even with speculative execution. + */ + page = pte_page(pkmap_page_table[i]); ++ pax_open_kernel(); + pte_clear(&init_mm, (unsigned long)page_address(page), + &pkmap_page_table[i]); +- ++ pax_close_kernel(); + set_page_address(page, NULL); + need_flush = 1; + } +@@ -177,9 +178,11 @@ start: + } + } + vaddr = PKMAP_ADDR(last_pkmap_nr); ++ ++ pax_open_kernel(); + set_pte_at(&init_mm, vaddr, + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); +- ++ pax_close_kernel(); + pkmap_count[last_pkmap_nr] = 1; + set_page_address(page, (void *)vaddr); + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 5e1e508..f6cc035 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1694,6 +1694,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) + kref_get(&reservations->refs); + } + ++static void resv_map_put(struct vm_area_struct *vma) ++{ ++ struct resv_map *reservations = vma_resv_map(vma); ++ ++ if (!reservations) ++ return; ++ kref_put(&reservations->refs, resv_map_release); ++} ++ + static void hugetlb_vm_op_close(struct vm_area_struct *vma) + { + struct hstate *h = hstate_vma(vma); +@@ -1709,7 +1718,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) + reserve = (end - start) - + region_count(&reservations->regions, start, end); + +- kref_put(&reservations->refs, resv_map_release); ++ resv_map_put(vma); + + if (reserve) { + hugetlb_acct_memory(h, -reserve); +@@ -1933,6 +1942,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, + return 1; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ struct vm_area_struct *vma_m; ++ unsigned long address_m; ++ pte_t *ptep_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK); ++ get_page(page_m); ++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0)); ++} ++#endif ++ + static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, pte_t pte, + struct page *pagecache_page) +@@ -2004,6 +2033,11 @@ retry_avoidcopy: + huge_ptep_clear_flush(vma, address, ptep); + set_huge_pte_at(mm, address, ptep, + make_huge_pte(vma, new_page, 1)); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, new_page); ++#endif ++ + /* Make the old page be freed below */ + new_page = old_page; + } +@@ -2135,6 +2169,10 @@ retry: + && (vma->vm_flags & VM_SHARED))); + set_huge_pte_at(mm, address, ptep, new_pte); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, page); ++#endif ++ + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { + /* Optimization, do the COW without a second fault */ + ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); +@@ -2163,6 +2201,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + static DEFINE_MUTEX(hugetlb_instantiation_mutex); + struct hstate *h = hstate_vma(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ h = hstate_vma(vma); ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h))) ++ return VM_FAULT_OOM; ++ address_m &= HPAGE_MASK; ++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL); ++ } ++#endif ++ + ptep = huge_pte_alloc(mm, address, huge_page_size(h)); + if (!ptep) + return VM_FAULT_OOM; +@@ -2392,12 +2452,16 @@ int hugetlb_reserve_pages(struct inode *inode, + set_vma_resv_flags(vma, HPAGE_RESV_OWNER); + } + +- if (chg < 0) +- return chg; ++ if (chg < 0) { ++ ret = chg; ++ goto out_err; ++ } + + /* There must be enough filesystem quota for the mapping */ +- if (hugetlb_get_quota(inode->i_mapping, chg)) +- return -ENOSPC; ++ if (hugetlb_get_quota(inode->i_mapping, chg)) { ++ ret = -ENOSPC; ++ goto out_err; ++ } + + /* + * Check enough hugepages are available for the reservation. +@@ -2406,7 +2470,7 @@ int hugetlb_reserve_pages(struct inode *inode, + ret = hugetlb_acct_memory(h, chg); + if (ret < 0) { + hugetlb_put_quota(inode->i_mapping, chg); +- return ret; ++ goto out_err; + } + + /* +@@ -2423,6 +2487,9 @@ int hugetlb_reserve_pages(struct inode *inode, + if (!vma || vma->vm_flags & VM_MAYSHARE) + region_add(&inode->i_mapping->private_list, from, to); + return 0; ++out_err: ++ resv_map_put(vma); ++ return ret; + } + + void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) +diff --git a/mm/internal.h b/mm/internal.h +index f03e8e2..7354343 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page); + * in mm/page_alloc.c + */ + extern void __free_pages_bootmem(struct page *page, unsigned int order); ++extern void free_compound_page(struct page *page); + extern void prep_compound_page(struct page *page, unsigned long order); + + +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index c346660..b47382f 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq, + + for (i = 0; i < object->trace_len; i++) { + void *ptr = (void *)object->trace[i]; +- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); ++ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr); + } + } + +diff --git a/mm/maccess.c b/mm/maccess.c +index 9073695..1127f348 100644 +--- a/mm/maccess.c ++++ b/mm/maccess.c +@@ -14,7 +14,7 @@ + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-long probe_kernel_read(void *dst, void *src, size_t size) ++long probe_kernel_read(void *dst, const void *src, size_t size) + { + long ret; + mm_segment_t old_fs = get_fs(); +@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size) + set_fs(KERNEL_DS); + pagefault_disable(); + ret = __copy_from_user_inatomic(dst, +- (__force const void __user *)src, size); ++ (const void __force_user *)src, size); + pagefault_enable(); + set_fs(old_fs); + +@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size) ++long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size) + { + long ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + pagefault_disable(); +- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); ++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); + pagefault_enable(); + set_fs(old_fs); + +diff --git a/mm/madvise.c b/mm/madvise.c +index 35b1479..499f7d4 100644 +--- a/mm/madvise.c ++++ b/mm/madvise.c +@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma, + pgoff_t pgoff; + unsigned long new_flags = vma->vm_flags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + switch (behavior) { + case MADV_NORMAL: + new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; +@@ -103,6 +107,13 @@ success: + /* + * vm_flags is protected by the mmap_sem held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) ++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT); ++#endif ++ + vma->vm_flags = new_flags; + + out: +@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma, + struct vm_area_struct ** prev, + unsigned long start, unsigned long end) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + *prev = vma; + if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) + return -EINVAL; +@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma, + zap_page_range(vma, start, end - start, &details); + } else + zap_page_range(vma, start, end - start, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) { ++ struct zap_details details = { ++ .nonlinear_vma = vma_m, ++ .last_index = ULONG_MAX, ++ }; ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details); ++ } else ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL); ++ } ++#endif ++ + return 0; + } + +@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) + if (end < start) + goto out; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ goto out; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ goto out; ++ + error = 0; + if (end == start) + goto out; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 8aeba53..b4a4198 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0; + + int sysctl_memory_failure_recovery __read_mostly = 1; + +-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); ++atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); + + /* + * Send all the processes who have the page mapped an ``action optional'' +@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_MCEERR_AO; +- si.si_addr = (void *)addr; ++ si.si_addr = (void __user *)addr; + #ifdef __ARCH_SI_TRAPNO + si.si_trapno = trapno; + #endif +@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) + return 0; + } + +- atomic_long_add(1, &mce_bad_pages); ++ atomic_long_add_unchecked(1, &mce_bad_pages); + + /* + * We need/can do nothing about count=0 pages. +diff --git a/mm/memory.c b/mm/memory.c +index 6c836d3..b2296e1 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + return; + + pmd = pmd_offset(pud, start); ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD) + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); ++#endif ++ + } + + static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, +@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, + if (end - 1 > ceiling - 1) + return; + ++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD) + pud = pud_offset(pgd, start); + pgd_clear(pgd); + pud_free_tlb(tlb, pud, start); ++#endif ++ + } + + /* +@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); + i = 0; + +- do { ++ while (nr_pages) { + struct vm_area_struct *vma; + +- vma = find_extend_vma(mm, start); ++ vma = find_vma(mm, start); + if (!vma && in_gate_area(tsk, start)) { + unsigned long pg = start & PAGE_MASK; + struct vm_area_struct *gate_vma = get_gate_vma(tsk); +@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + continue; + } + +- if (!vma || ++ if (!vma || start < vma->vm_start || + (vma->vm_flags & (VM_IO | VM_PFNMAP)) || + !(vm_flags & vma->vm_flags)) + return i ? : -EFAULT; +@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + start += PAGE_SIZE; + nr_pages--; + } while (nr_pages && start < vma->vm_end); +- } while (nr_pages); ++ } + return i; + } + +@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, + page_add_file_rmap(page); + set_pte_at(mm, addr, pte, mk_pte(page, prot)); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_file_pte(vma, addr, page, ptl); ++#endif ++ + retval = 0; + pte_unmap_unlock(pte, ptl); + return retval; +@@ -1560,10 +1571,22 @@ out: + int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, + struct page *page) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (!page_count(page)) + return -EINVAL; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) ++ vma_m->vm_flags |= VM_INSERTPAGE; ++#endif ++ + vma->vm_flags |= VM_INSERTPAGE; + return insert_page(vma, addr, page, vma->vm_page_prot); + } +@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) + { + BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); ++ BUG_ON(vma->vm_mirror); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; +@@ -1855,7 +1879,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, + + BUG_ON(pud_huge(*pud)); + +- pmd = pmd_alloc(mm, pud, addr); ++ pmd = (mm == &init_mm) ? ++ pmd_alloc_kernel(mm, pud, addr) : ++ pmd_alloc(mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -1875,7 +1901,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long next; + int err; + +- pud = pud_alloc(mm, pgd, addr); ++ pud = (mm == &init_mm) ? ++ pud_alloc_kernel(mm, pgd, addr) : ++ pud_alloc(mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { +@@ -1977,6 +2005,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo + copy_user_highpage(dst, src, va, vma); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ spinlock_t *ptl; ++ pte_t *pte, entry; ++ ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl); ++ entry = *pte; ++ if (!pte_present(entry)) { ++ if (!pte_none(entry)) { ++ BUG_ON(pte_file(entry)); ++ free_swap_and_cache(pte_to_swp_entry(entry)); ++ pte_clear_not_present_full(mm, address, pte, 0); ++ } ++ } else { ++ struct page *page; ++ ++ flush_cache_page(vma, address, pte_pfn(entry)); ++ entry = ptep_clear_flush(vma, address, pte); ++ BUG_ON(pte_dirty(entry)); ++ page = vm_normal_page(vma, address, entry); ++ if (page) { ++ update_hiwater_rss(mm); ++ if (PageAnon(page)) ++ dec_mm_counter(mm, anon_rss); ++ else ++ dec_mm_counter(mm, file_rss); ++ page_remove_rmap(page); ++ page_cache_release(page); ++ } ++ } ++ pte_unmap_unlock(pte, ptl); ++} ++ ++/* PaX: if vma is mirrored, synchronize the mirror's PTE ++ * ++ * the ptl of the lower mapped page is held on entry and is not released on exit ++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) ++ */ ++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || !PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(!PageLocked(page_m)); ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map_nested(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_anon_rmap(page_m, vma_m, address_m); ++ inc_mm_counter(mm, anon_rss); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap_nested(pte_m); ++ unlock_page(page_m); ++} ++ ++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map_nested(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_file_rmap(page_m); ++ inc_mm_counter(mm, file_rss); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap_nested(pte_m); ++} ++ ++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map_nested(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap_nested(pte_m); ++} ++ ++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl) ++{ ++ struct page *page_m; ++ pte_t entry; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC)) ++ goto out; ++ ++ entry = *pte; ++ page_m = vm_normal_page(vma, address, entry); ++ if (!page_m) ++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl); ++ else if (PageAnon(page_m)) { ++ if (pax_find_mirror_vma(vma)) { ++ pte_unmap_unlock(pte, ptl); ++ lock_page(page_m); ++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl); ++ if (pte_same(entry, *pte)) ++ pax_mirror_anon_pte(vma, address, page_m, ptl); ++ else ++ unlock_page(page_m); ++ } ++ } else ++ pax_mirror_file_pte(vma, address, page_m, ptl); ++ ++out: ++ pte_unmap_unlock(pte, ptl); ++} ++#endif ++ + /* + * This routine handles present pages, when users try to write + * to a shared page. It is done by copying the page to a new address +@@ -2156,6 +2364,12 @@ gotten: + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(new_page)); ++#endif ++ + if (old_page) { + if (!PageAnon(old_page)) { + dec_mm_counter(mm, file_rss); +@@ -2207,6 +2421,10 @@ gotten: + page_remove_rmap(old_page); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, new_page, ptl); ++#endif ++ + /* Free the old page.. */ + new_page = old_page; + ret |= VM_FAULT_WRITE; +@@ -2606,6 +2824,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + swap_free(entry); + if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + try_to_free_swap(page); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma)) ++#endif ++ + unlock_page(page); + + if (flags & FAULT_FLAG_WRITE) { +@@ -2617,6 +2840,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, pte); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + out: +@@ -2632,40 +2860,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_{down|up}wards()", +- * except we must first make sure that 'address{-|+}PAGE_SIZE' +- * doesn't hit another vma. +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- expand_stack(vma, address - PAGE_SIZE); +- } +- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { +- struct vm_area_struct *next = vma->vm_next; +- +- /* As VM_GROWSDOWN but s/below/above/ */ +- if (next && next->vm_start == address + PAGE_SIZE) +- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; +- +- expand_upwards(vma, address + PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -2674,27 +2868,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags) + { +- struct page *page; ++ struct page *page = NULL; + spinlock_t *ptl; + pte_t entry; + +- pte_unmap(page_table); +- +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; +- +- /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ ++ pte_unmap(page_table); ++ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); +@@ -2713,6 +2903,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + if (!pte_none(*page_table)) + goto release; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + inc_mm_counter(mm, anon_rss); + page_add_new_anon_rmap(page, vma, address); + setpte: +@@ -2720,6 +2915,12 @@ setpte: + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, entry); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (page) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + return 0; +@@ -2862,6 +3063,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, + */ + /* Only go through if we didn't race with anybody else... */ + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon && pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + flush_icache_page(vma, page); + entry = mk_pte(page, vma->vm_page_prot); + if (flags & FAULT_FLAG_WRITE) +@@ -2881,6 +3088,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache(vma, address, entry); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++ else ++ pax_mirror_file_pte(vma, address, page, ptl); ++#endif ++ + } else { + if (charged) + mem_cgroup_uncharge_page(page); +@@ -3028,6 +3243,12 @@ static inline int handle_pte_fault(struct mm_struct *mm, + if (flags & FAULT_FLAG_WRITE) + flush_tlb_page(vma, address); + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_pte(vma, address, pte, pmd, ptl); ++ return 0; ++#endif ++ + unlock: + pte_unmap_unlock(pte, ptl); + return 0; +@@ -3044,6 +3265,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + pmd_t *pmd; + pte_t *pte; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + __set_current_state(TASK_RUNNING); + + count_vm_event(PGFAULT); +@@ -3051,6 +3276,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + if (unlikely(is_vm_hugetlb_page(vma))) + return hugetlb_fault(mm, vma, address, flags); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ pgd_t *pgd_m; ++ pud_t *pud_m; ++ pmd_t *pmd_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ pgd_m = pgd_offset(mm, address_m); ++ pud_m = pud_alloc(mm, pgd_m, address_m); ++ if (!pud_m) ++ return VM_FAULT_OOM; ++ pmd_m = pmd_alloc(mm, pud_m, address_m); ++ if (!pmd_m) ++ return VM_FAULT_OOM; ++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m)) ++ return VM_FAULT_OOM; ++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m); ++ } ++#endif ++ + pgd = pgd_offset(mm, address); + pud = pud_alloc(mm, pgd, address); + if (!pud) +@@ -3086,6 +3339,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) + spin_unlock(&mm->page_table_lock); + return 0; + } ++ ++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address) ++{ ++ pud_t *new = pud_alloc_one(mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ smp_wmb(); /* See comment in __pte_alloc */ ++ ++ spin_lock(&mm->page_table_lock); ++ if (pgd_present(*pgd)) /* Another has populated it */ ++ pud_free(mm, new); ++ else ++ pgd_populate_kernel(mm, pgd, new); ++ spin_unlock(&mm->page_table_lock); ++ return 0; ++} + #endif /* __PAGETABLE_PUD_FOLDED */ + + #ifndef __PAGETABLE_PMD_FOLDED +@@ -3116,6 +3386,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) + spin_unlock(&mm->page_table_lock); + return 0; + } ++ ++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address) ++{ ++ pmd_t *new = pmd_alloc_one(mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ smp_wmb(); /* See comment in __pte_alloc */ ++ ++ spin_lock(&mm->page_table_lock); ++#ifndef __ARCH_HAS_4LEVEL_HACK ++ if (pud_present(*pud)) /* Another has populated it */ ++ pmd_free(mm, new); ++ else ++ pud_populate_kernel(mm, pud, new); ++#else ++ if (pgd_present(*pud)) /* Another has populated it */ ++ pmd_free(mm, new); ++ else ++ pgd_populate_kernel(mm, pud, new); ++#endif /* __ARCH_HAS_4LEVEL_HACK */ ++ spin_unlock(&mm->page_table_lock); ++ return 0; ++} + #endif /* __PAGETABLE_PMD_FOLDED */ + + int make_pages_present(unsigned long addr, unsigned long end) +@@ -3148,7 +3442,7 @@ static int __init gate_vma_init(void) + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; +- gate_vma.vm_page_prot = __P101; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + /* + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index 3c6e3e2..b1ddbb8 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start, + struct vm_area_struct *next; + int err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + err = 0; + for (; vma && vma->vm_start < end; vma = next) { + next = vma->vm_next; +@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start, + err = policy_vma(vma, new); + if (err) + break; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ err = policy_vma(vma_m, new); ++ if (err) ++ break; ++ } ++#endif ++ + } + return err; + } +@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len, + + if (end < start) + return -EINVAL; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (end == start) + return 0; + +@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + if (!mm) + return -EINVAL; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (mm != current->mm && ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { ++ err = -EPERM; ++ goto out; ++ } ++#endif ++ + /* + * Check if this process has the right to modify the specified + * process. The right exists if the process has administrative +@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && +- cred->uid != tcred->suid && cred->uid != tcred->uid && +- !capable(CAP_SYS_NICE)) { ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out; +@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma, + } + #endif + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + /* + * Display pages allocated per node and memory policy via /proc. + */ +@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v) + int n; + char buffer[50]; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("numa_maps"); ++ return 0; ++ } ++#endif ++ + if (!mm) + return 0; + +@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v) + mpol_to_str(buffer, sizeof(buffer), pol, 0); + mpol_cond_put(pol); + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer); ++#else + seq_printf(m, "%08lx %s", vma->vm_start, buffer); ++#endif + + if (file) { + seq_printf(m, " file="); +- seq_path(m, &file->f_path, "\n\t= "); ++ seq_path(m, &file->f_path, "\n\t\\= "); + } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { + seq_printf(m, " heap"); + } else if (vma->vm_start <= mm->start_stack && +diff --git a/mm/migrate.c b/mm/migrate.c +index aaca868..2ebecdc 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, + unsigned long chunk_start; + int err; + ++ pax_track_stack(); ++ + task_nodes = cpuset_mems_allowed(task); + + err = -ENOMEM; +@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, + if (!mm) + return -EINVAL; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (mm != current->mm && ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { ++ err = -EPERM; ++ goto out; ++ } ++#endif ++ + /* + * Check if this process has the right to modify the specified + * process. The right exists if the process has administrative +@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && +- cred->uid != tcred->suid && cred->uid != tcred->uid && +- !capable(CAP_SYS_NICE)) { ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out; +diff --git a/mm/mlock.c b/mm/mlock.c +index 2d846cf..98134d2 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -13,6 +13,7 @@ + #include <linux/pagemap.h> + #include <linux/mempolicy.h> + #include <linux/syscalls.h> ++#include <linux/security.h> + #include <linux/sched.h> + #include <linux/module.h> + #include <linux/rmap.h> +@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page) + } + } + +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_stack_continue(vma->vm_prev, addr); +-} +- + /** + * __mlock_vma_pages_range() - mlock a range of pages in the vma. + * @vma: target vma +@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, + if (vma->vm_flags & VM_WRITE) + gup_flags |= FOLL_WRITE; + +- /* We don't try to access the guard page of a stack vma */ +- if (stack_guard_page(vma, start)) { +- addr += PAGE_SIZE; +- nr_pages--; +- } +- + while (nr_pages > 0) { + int i; + +@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on) + { + unsigned long nstart, end, tmp; + struct vm_area_struct * vma, * prev; +- int error; ++ int error = -EINVAL; + + len = PAGE_ALIGN(len); + end = start + len; +@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on) + return -EINVAL; + if (end == start) + return 0; ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + vma = find_vma_prev(current->mm, start, &prev); + if (!vma || vma->vm_start > start) + return -ENOMEM; +@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on) + for (nstart = start ; ; ) { + unsigned int newflags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif ++ + /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ + + newflags = vma->vm_flags | VM_LOCKED; +@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) + lock_limit >>= PAGE_SHIFT; + + /* check against resource limits */ ++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1); + if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) + error = do_mlock(start, len, 1); + up_write(¤t->mm->mmap_sem); +@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) + static int do_mlockall(int flags) + { + struct vm_area_struct * vma, * prev = NULL; +- unsigned int def_flags = 0; + + if (flags & MCL_FUTURE) +- def_flags = VM_LOCKED; +- current->mm->def_flags = def_flags; ++ current->mm->def_flags |= VM_LOCKED; ++ else ++ current->mm->def_flags &= ~VM_LOCKED; + if (flags == MCL_FUTURE) + goto out; + + for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { +- unsigned int newflags; ++ unsigned long newflags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif ++ ++ BUG_ON(vma->vm_end > TASK_SIZE); + newflags = vma->vm_flags | VM_LOCKED; + if (!(flags & MCL_CURRENT)) + newflags &= ~VM_LOCKED; +@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) + lock_limit >>= PAGE_SHIFT; + + ret = -ENOMEM; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1); + if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || + capable(CAP_IPC_LOCK)) + ret = do_mlockall(flags); +diff --git a/mm/mmap.c b/mm/mmap.c +index 4b80cbf..f1145be 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -45,6 +45,16 @@ + #define arch_rebalance_pgtables(addr, len) (addr) + #endif + ++static inline void verify_mm_writelocked(struct mm_struct *mm) ++{ ++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX) ++ if (unlikely(down_read_trylock(&mm->mmap_sem))) { ++ up_read(&mm->mmap_sem); ++ BUG(); ++ } ++#endif ++} ++ + static void unmap_region(struct mm_struct *mm, + struct vm_area_struct *vma, struct vm_area_struct *prev, + unsigned long start, unsigned long end); +@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm, + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + */ +-pgprot_t protection_map[16] = { ++pgprot_t protection_map[16] __read_only = { + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 + }; + + pgprot_t vm_get_page_prot(unsigned long vm_flags) + { +- return __pgprot(pgprot_val(protection_map[vm_flags & ++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | + pgprot_val(arch_vm_get_page_prot(vm_flags))); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if (!nx_enabled && ++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC && ++ (vm_flags & (VM_READ | VM_WRITE))) ++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot))))); ++#endif ++ ++ return prot; + } + EXPORT_SYMBOL(vm_get_page_prot); + + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; ++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024; + struct percpu_counter vm_committed_as; + + /* +@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) + struct vm_area_struct *next = vma->vm_next; + + might_sleep(); ++ BUG_ON(vma->vm_mirror); + if (vma->vm_ops && vma->vm_ops->close) + vma->vm_ops->close(vma); + if (vma->vm_file) { +@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + * not page aligned -Ram Gupta + */ + rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; ++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1); + if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + + (mm->end_data - mm->start_data) > rlim) + goto out; +@@ -704,6 +726,12 @@ static int + can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { + if (vma->vm_pgoff == vm_pgoff) +@@ -723,6 +751,12 @@ static int + can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { + pgoff_t vm_pglen; +@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, + struct vm_area_struct *vma_merge(struct mm_struct *mm, + struct vm_area_struct *prev, unsigned long addr, + unsigned long end, unsigned long vm_flags, +- struct anon_vma *anon_vma, struct file *file, ++ struct anon_vma *anon_vma, struct file *file, + pgoff_t pgoff, struct mempolicy *policy) + { + pgoff_t pglen = (end - addr) >> PAGE_SHIFT; + struct vm_area_struct *area, *next; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE; ++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL; ++ ++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end); ++#endif ++ + /* + * We later require that vma->vm_flags == vm_flags, + * so this tests vma->vm_flags & VM_SPECIAL, too. +@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + if (next && next->vm_end == end) /* cases 6, 7, 8 */ + next = next->vm_next; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (prev) ++ prev_m = pax_find_mirror_vma(prev); ++ if (area) ++ area_m = pax_find_mirror_vma(area); ++ if (next) ++ next_m = pax_find_mirror_vma(next); ++#endif ++ + /* + * Can it merge with the predecessor? + */ +@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + /* cases 1, 6 */ + vma_adjust(prev, prev->vm_start, + next->vm_end, prev->vm_pgoff, NULL); +- } else /* cases 2, 5, 7 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (prev_m) ++ vma_adjust(prev_m, prev_m->vm_start, ++ next_m->vm_end, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 2, 5, 7 */ + vma_adjust(prev, prev->vm_start, + end, prev->vm_pgoff, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (prev_m) ++ vma_adjust(prev_m, prev_m->vm_start, ++ end_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } + return prev; + } + +@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + mpol_equal(policy, vma_policy(next)) && + can_vma_merge_before(next, vm_flags, + anon_vma, file, pgoff+pglen)) { +- if (prev && addr < prev->vm_end) /* case 4 */ ++ if (prev && addr < prev->vm_end) { /* case 4 */ + vma_adjust(prev, prev->vm_start, + addr, prev->vm_pgoff, NULL); +- else /* cases 3, 8 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (prev_m) ++ vma_adjust(prev_m, prev_m->vm_start, ++ addr_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 3, 8 */ + vma_adjust(area, addr, next->vm_end, + next->vm_pgoff - pglen, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (area_m) ++ vma_adjust(area_m, addr_m, next_m->vm_end, ++ next_m->vm_pgoff - pglen, NULL); ++#endif ++ ++ } + return area; + } + +@@ -898,14 +978,11 @@ none: + void vm_stat_account(struct mm_struct *mm, unsigned long flags, + struct file *file, long pages) + { +- const unsigned long stack_flags +- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); +- + if (file) { + mm->shared_vm += pages; + if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) + mm->exec_vm += pages; +- } else if (flags & stack_flags) ++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN)) + mm->stack_vm += pages; + if (flags & (VM_RESERVED|VM_IO)) + mm->reserved_vm += pages; +@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + * (the exception is when the underlying filesystem is noexec + * mounted, in which case we dont add PROT_EXEC.) + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) + prot |= PROT_EXEC; + +@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ +- addr = get_unmapped_area(file, addr, len, pgoff, flags); ++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0)); + if (addr & ~PAGE_MASK) + return addr; + +@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++#ifndef CONFIG_PAX_MPROTECT_COMPAT ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) { ++ gr_log_rwxmmap(file); ++ ++#ifdef CONFIG_PAX_EMUPLT ++ vm_flags &= ~VM_EXEC; ++#else ++ return -EPERM; ++#endif ++ ++ } ++ ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++#else ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file) ++ vm_flags &= ~VM_PAGEEXEC; ++#endif ++ + if (flags & MAP_LOCKED) + if (!can_do_mlock()) + return -EPERM; +@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + locked += mm->locked_vm; + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; + lock_limit >>= PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + return -EAGAIN; + } +@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + if (error) + return error; + ++ if (!gr_acl_handle_mmap(file, prot)) ++ return -EACCES; ++ + return mmap_region(file, addr, len, flags, vm_flags, pgoff); + } + EXPORT_SYMBOL(do_mmap_pgoff); +@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff); + */ + int vma_wants_writenotify(struct vm_area_struct *vma) + { +- unsigned int vm_flags = vma->vm_flags; ++ unsigned long vm_flags = vma->vm_flags; + + /* If it was private or non-writable, the write bit is already clear */ +- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) ++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED)) + return 0; + + /* The backer wishes to know when pages are first written to? */ +@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long charged = 0; + struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + /* Clear old maps */ + error = -ENOMEM; +-munmap_back: + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + if (vma && vma->vm_start < addr + len) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); ++ BUG_ON(vma && vma->vm_start < addr + len); + } + + /* Check against address space limit. */ +@@ -1173,6 +1294,16 @@ munmap_back: + goto unacct_error; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto free_vma; ++ } ++ } ++#endif ++ + vma->vm_mm = mm; + vma->vm_start = addr; + vma->vm_end = addr + len; +@@ -1180,8 +1311,9 @@ munmap_back: + vma->vm_page_prot = vm_get_page_prot(vm_flags); + vma->vm_pgoff = pgoff; + ++ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */ ++ + if (file) { +- error = -EINVAL; + if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) + goto free_vma; + if (vm_flags & VM_DENYWRITE) { +@@ -1195,6 +1327,19 @@ munmap_back: + error = file->f_op->mmap(file, vma); + if (error) + goto unmap_and_free_vma; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m && (vm_flags & VM_EXECUTABLE)) ++ added_exe_file_vma(mm); ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) { ++ vma->vm_flags |= VM_PAGEEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ } ++#endif ++ + if (vm_flags & VM_EXECUTABLE) + added_exe_file_vma(mm); + +@@ -1207,6 +1352,8 @@ munmap_back: + pgoff = vma->vm_pgoff; + vm_flags = vma->vm_flags; + } else if (vm_flags & VM_SHARED) { ++ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) ++ goto free_vma; + error = shmem_zero_setup(vma); + if (error) + goto free_vma; +@@ -1218,6 +1365,11 @@ munmap_back: + vma_link(mm, vma, prev, rb_link, rb_parent); + file = vma->vm_file; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ pax_mirror_vma(vma_m, vma); ++#endif ++ + /* Once vma denies write, undo our temporary denial count */ + if (correct_wcount) + atomic_inc(&inode->i_writecount); +@@ -1226,6 +1378,7 @@ out: + + mm->total_vm += len >> PAGE_SHIFT; + vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); ++ track_exec_limit(mm, addr, addr + len, vm_flags); + if (vm_flags & VM_LOCKED) { + /* + * makes pages present; downgrades, drops, reacquires mmap_sem +@@ -1248,6 +1401,12 @@ unmap_and_free_vma: + unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); + charged = 0; + free_vma: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ kmem_cache_free(vm_area_cachep, vma_m); ++#endif ++ + kmem_cache_free(vm_area_cachep, vma); + unacct_error: + if (charged) +@@ -1255,6 +1414,44 @@ unacct_error: + return error; + } + ++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len) ++{ ++ if (!vma) { ++#ifdef CONFIG_STACK_GROWSUP ++ if (addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ else ++ vma = find_vma(current->mm, 0); ++ if (vma && (vma->vm_flags & VM_GROWSUP)) ++ return false; ++#endif ++ return true; ++ } ++ ++ if (addr + len > vma->vm_start) ++ return false; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++#ifdef CONFIG_STACK_GROWSUP ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) ++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; ++#endif ++ ++ return true; ++} ++ ++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len) ++{ ++ if (vma->vm_start < len) ++ return -ENOMEM; ++ if (!(vma->vm_flags & VM_GROWSDOWN)) ++ return vma->vm_start - len; ++ if (sysctl_heap_stack_gap <= vma->vm_start - len) ++ return vma->vm_start - len - sysctl_heap_stack_gap; ++ return -ENOMEM; ++} ++ + /* Get an address range which is currently unmapped. + * For shmat() with addr=0. + * +@@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + if (len > mm->cached_hole_size) { +- start_addr = addr = mm->free_area_cache; ++ start_addr = addr = mm->free_area_cache; + } else { +- start_addr = addr = TASK_UNMAPPED_BASE; +- mm->cached_hole_size = 0; ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; + } + + full_search: +@@ -1303,34 +1505,40 @@ full_search: + * Start a new search - just in case we missed + * some holes. + */ +- if (start_addr != TASK_UNMAPPED_BASE) { +- addr = TASK_UNMAPPED_BASE; +- start_addr = addr; ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- /* +- * Remember the place where we stopped the search: +- */ +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; + } + #endif + + void arch_unmap_area(struct mm_struct *mm, unsigned long addr) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) ++ return; ++#endif ++ + /* + * Is this a new hole at the lowest possible address? + */ +- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { ++ if (addr >= mm->mmap_base && addr < mm->free_area_cache) { + mm->free_area_cache = addr; + mm->cached_hole_size = ~0UL; + } +@@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + { + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; +- unsigned long addr = addr0; ++ unsigned long base = mm->mmap_base, addr = addr0; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) +@@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ +@@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + +@@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = vma->vm_start-len; +- } while (len < vma->vm_start); ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); + + bottomup: + /* +@@ -1414,13 +1627,21 @@ bottomup: + * can happen with large stack limits and large mmap() + * allocations. + */ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; +- mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ +- mm->free_area_cache = mm->mmap_base; ++ mm->mmap_base = base; ++ mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; +@@ -1429,6 +1650,12 @@ bottomup: + + void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) ++ return; ++#endif ++ + /* + * Is this a new hole at the highest possible address? + */ +@@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) + mm->free_area_cache = addr; + + /* dont allow allocations above current base */ +- if (mm->free_area_cache > mm->mmap_base) ++ if (mm->free_area_cache > mm->mmap_base) { + mm->free_area_cache = mm->mmap_base; ++ mm->cached_hole_size = ~0UL; ++ } + } + + unsigned long +@@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) + + EXPORT_SYMBOL(find_vma); + +-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ ++/* ++ * Same as find_vma, but also return a pointer to the previous VMA in *pprev. ++ */ + struct vm_area_struct * + find_vma_prev(struct mm_struct *mm, unsigned long addr, + struct vm_area_struct **pprev) + { +- struct vm_area_struct *vma = NULL, *prev = NULL; +- struct rb_node *rb_node; +- if (!mm) +- goto out; +- +- /* Guard against addr being lower than the first VMA */ +- vma = mm->mmap; +- +- /* Go through the RB tree quickly. */ +- rb_node = mm->mm_rb.rb_node; +- +- while (rb_node) { +- struct vm_area_struct *vma_tmp; +- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); +- +- if (addr < vma_tmp->vm_end) { +- rb_node = rb_node->rb_left; +- } else { +- prev = vma_tmp; +- if (!prev->vm_next || (addr < prev->vm_next->vm_end)) +- break; ++ struct vm_area_struct *vma; ++ ++ vma = find_vma(mm, addr); ++ if (vma) { ++ *pprev = vma->vm_prev; ++ } else { ++ struct rb_node *rb_node = mm->mm_rb.rb_node; ++ *pprev = NULL; ++ while (rb_node) { ++ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); + rb_node = rb_node->rb_right; + } + } ++ return vma; ++} ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *vma_m; + +-out: +- *pprev = prev; +- return prev ? prev->vm_next : vma; ++ BUG_ON(!vma || vma->vm_start >= vma->vm_end); ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) { ++ BUG_ON(vma->vm_mirror); ++ return NULL; ++ } ++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end); ++ vma_m = vma->vm_mirror; ++ BUG_ON(!vma_m || vma_m->vm_mirror != vma); ++ BUG_ON(vma->vm_file != vma_m->vm_file); ++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start); ++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma); ++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED)); ++ return vma_m; + } ++#endif + + /* + * Verify that the stack growth is acceptable and +@@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + return -ENOMEM; + + /* Stack limit test */ ++ gr_learn_resource(current, RLIMIT_STACK, size, 1); + if (size > rlim[RLIMIT_STACK].rlim_cur) + return -ENOMEM; + +@@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + unsigned long limit; + locked = mm->locked_vm + grow; + limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > limit && !capable(CAP_IPC_LOCK)) + return -ENOMEM; + } +@@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + * PA-RISC uses this for its stack; IA64 for its Register Backing Store. + * vma is the last one with address > vma->vm_end. Have to extend vma. + */ ++#ifndef CONFIG_IA64 ++static ++#endif + int expand_upwards(struct vm_area_struct *vma, unsigned long address) + { + int error; ++ bool locknext; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + ++ /* Also guard against wrapping around to address 0. */ ++ if (address < PAGE_ALIGN(address+1)) ++ address = PAGE_ALIGN(address+1); ++ else ++ return -ENOMEM; ++ + /* + * We must make sure the anon_vma is allocated + * so that the anon_vma locking is not a noop. + */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; ++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN); ++ if (locknext && anon_vma_prepare(vma->vm_next)) ++ return -ENOMEM; + anon_vma_lock(vma); ++ if (locknext) ++ anon_vma_lock(vma->vm_next); + + /* + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the +- * anon_vma lock to serialize against concurrent expand_stacks. +- * Also guard against wrapping around to address 0. ++ * anon_vma locks to serialize against concurrent expand_stacks ++ * and expand_upwards. + */ +- if (address < PAGE_ALIGN(address+4)) +- address = PAGE_ALIGN(address+4); +- else { +- anon_vma_unlock(vma); +- return -ENOMEM; +- } + error = 0; + + /* Somebody else might have raced and expanded it already */ +- if (address > vma->vm_end) { ++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { + unsigned long size, grow; + + size = address - vma->vm_start; +@@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) + vma->vm_end = address; + } + } ++ if (locknext) ++ anon_vma_unlock(vma->vm_next); + anon_vma_unlock(vma); + return error; + } +@@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma, + unsigned long address) + { + int error; ++ bool lockprev = false; ++ struct vm_area_struct *prev; + + /* + * We must make sure the anon_vma is allocated +@@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma, + if (error) + return error; + ++ prev = vma->vm_prev; ++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) ++ lockprev = prev && (prev->vm_flags & VM_GROWSUP); ++#endif ++ if (lockprev && anon_vma_prepare(prev)) ++ return -ENOMEM; ++ if (lockprev) ++ anon_vma_lock(prev); ++ + anon_vma_lock(vma); + + /* +@@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma, + */ + + /* Somebody else might have raced and expanded it already */ +- if (address < vma->vm_start) { ++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { + unsigned long size, grow; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++#endif ++ + size = vma->vm_end - address; + grow = (vma->vm_start - address) >> PAGE_SHIFT; + +@@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma, + if (!error) { + vma->vm_start = address; + vma->vm_pgoff -= grow; ++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ vma_m->vm_start -= grow << PAGE_SHIFT; ++ vma_m->vm_pgoff -= grow; ++ } ++#endif ++ ++ + } + } + } + anon_vma_unlock(vma); ++ if (lockprev) ++ anon_vma_unlock(prev); + return error; + } + +@@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) + do { + long nrpages = vma_pages(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) { ++ vma = remove_vma(vma); ++ continue; ++ } ++#endif ++ + mm->total_vm -= nrpages; + vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); + vma = remove_vma(vma); +@@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, + insertion_point = (prev ? &prev->vm_next : &mm->mmap); + vma->vm_prev = NULL; + do { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma->vm_mirror) { ++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma); ++ vma->vm_mirror->vm_mirror = NULL; ++ vma->vm_mirror->vm_flags &= ~VM_EXEC; ++ vma->vm_mirror = NULL; ++ } ++#endif ++ + rb_erase(&vma->vm_rb, &mm->mm_rb); + mm->map_count--; + tail_vma = vma; +@@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + struct mempolicy *pol; + struct vm_area_struct *new; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m, *new_m = NULL; ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (is_vm_hugetlb_page(vma) && (addr & + ~(huge_page_mask(hstate_vma(vma))))) + return -EINVAL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE); ++ if (mm->map_count >= sysctl_max_map_count-1) ++ return -ENOMEM; ++ } else ++#endif ++ + if (mm->map_count >= sysctl_max_map_count) + return -ENOMEM; + +@@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + if (!new) + return -ENOMEM; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ if (!new_m) { ++ kmem_cache_free(vm_area_cachep, new); ++ return -ENOMEM; ++ } ++ } ++#endif ++ + /* most fields are the same, copy all, and then fixup */ + *new = *vma; + +@@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ *new_m = *vma_m; ++ new_m->vm_mirror = new; ++ new->vm_mirror = new_m; ++ ++ if (new_below) ++ new_m->vm_end = addr_m; ++ else { ++ new_m->vm_start = addr_m; ++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT); ++ } ++ } ++#endif ++ + pol = mpol_dup(vma_policy(vma)); + if (IS_ERR(pol)) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (new_m) ++ kmem_cache_free(vm_area_cachep, new_m); ++#endif ++ + kmem_cache_free(vm_area_cachep, new); + return PTR_ERR(pol); + } +@@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + else + vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ mpol_get(pol); ++ vma_set_policy(new_m, pol); ++ ++ if (new_m->vm_file) { ++ get_file(new_m->vm_file); ++ if (vma_m->vm_flags & VM_EXECUTABLE) ++ added_exe_file_vma(mm); ++ } ++ ++ if (new_m->vm_ops && new_m->vm_ops->open) ++ new_m->vm_ops->open(new_m); ++ ++ if (new_below) ++ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff + ++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m); ++ else ++ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m); ++ } ++#endif ++ + return 0; + } + +@@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + * work. This now handles partial unmappings. + * Jeremy Fitzhardinge <jeremy@goop.org> + */ ++#ifdef CONFIG_PAX_SEGMEXEC + int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) + { ++ int ret = __do_munmap(mm, start, len); ++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC)) ++ return ret; ++ ++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len); ++} ++ ++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#else ++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#endif ++{ + unsigned long end; + struct vm_area_struct *vma, *prev, *last; + ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) + return -EINVAL; + +@@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) + /* Fix up all other VM information */ + remove_vma_list(mm, vma); + ++ track_exec_limit(mm, start, end, 0UL); ++ + return 0; + } + +@@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) + + profile_munmap(addr); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)) ++ return -EINVAL; ++#endif ++ + down_write(&mm->mmap_sem); + ret = do_munmap(mm, addr, len); + up_write(&mm->mmap_sem); + return ret; + } + +-static inline void verify_mm_writelocked(struct mm_struct *mm) +-{ +-#ifdef CONFIG_DEBUG_VM +- if (unlikely(down_read_trylock(&mm->mmap_sem))) { +- WARN_ON(1); +- up_read(&mm->mmap_sem); +- } +-#endif +-} +- + /* + * this is really a simplified "do_mmap". it only handles + * anonymous maps. eventually we may be able to do some +@@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + struct rb_node ** rb_link, * rb_parent; + pgoff_t pgoff = addr >> PAGE_SHIFT; + int error; ++ unsigned long charged; + + len = PAGE_ALIGN(len); + if (!len) +@@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (error & ~PAGE_MASK) + return error; + ++ charged = len >> PAGE_SHIFT; ++ + /* + * mlock MCL_FUTURE? + */ + if (mm->def_flags & VM_LOCKED) { + unsigned long locked, lock_limit; +- locked = len >> PAGE_SHIFT; ++ locked = charged; + locked += mm->locked_vm; + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; + lock_limit >>= PAGE_SHIFT; +@@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + /* + * Clear old maps. this also does some error checking for us + */ +- munmap_back: + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + if (vma && vma->vm_start < addr + len) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); ++ BUG_ON(vma && vma->vm_start < addr + len); + } + + /* Check against address space limits *after* clearing old maps... */ +- if (!may_expand_vm(mm, len >> PAGE_SHIFT)) ++ if (!may_expand_vm(mm, charged)) + return -ENOMEM; + + if (mm->map_count > sysctl_max_map_count) + return -ENOMEM; + +- if (security_vm_enough_memory(len >> PAGE_SHIFT)) ++ if (security_vm_enough_memory(charged)) + return -ENOMEM; + + /* Can we just expand an old private anonymous mapping? */ +@@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + */ + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma) { +- vm_unacct_memory(len >> PAGE_SHIFT); ++ vm_unacct_memory(charged); + return -ENOMEM; + } + +@@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + vma->vm_page_prot = vm_get_page_prot(flags); + vma_link(mm, vma, prev, rb_link, rb_parent); + out: +- mm->total_vm += len >> PAGE_SHIFT; ++ mm->total_vm += charged; + if (flags & VM_LOCKED) { + if (!mlock_vma_pages_range(vma, addr, addr + len)) +- mm->locked_vm += (len >> PAGE_SHIFT); ++ mm->locked_vm += charged; + } ++ track_exec_limit(mm, addr, addr + len, flags); + return addr; + } + +@@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm) + * Walk the list again, actually closing and freeing it, + * with preemption enabled, without holding any MM locks. + */ +- while (vma) ++ while (vma) { ++ vma->vm_mirror = NULL; + vma = remove_vma(vma); ++ } + + BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + } +@@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) + struct vm_area_struct * __vma, * prev; + struct rb_node ** rb_link, * rb_parent; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ + /* + * The vm_pgoff of a purely anonymous vma should be irrelevant + * until its first write fault, when page's anon_vma and index +@@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) + if ((vma->vm_flags & VM_ACCOUNT) && + security_vm_enough_memory_mm(mm, vma_pages(vma))) + return -ENOMEM; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) ++ return -ENOMEM; ++ } ++#endif ++ + vma_link(mm, vma, prev, rb_link, rb_parent); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ pax_mirror_vma(vma_m, vma); ++#endif ++ + return 0; + } + +@@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + struct rb_node **rb_link, *rb_parent; + struct mempolicy *pol; + ++ BUG_ON(vma->vm_mirror); ++ + /* + * If anonymous vma has not yet been faulted, update new pgoff + * to match new location, to increase its chance of merging. +@@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + return new_vma; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *prev_m; ++ struct rb_node **rb_link_m, *rb_parent_m; ++ struct mempolicy *pol_m; ++ ++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)); ++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror); ++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m))); ++ *vma_m = *vma; ++ pol_m = vma_policy(vma_m); ++ mpol_get(pol_m); ++ vma_set_policy(vma_m, pol_m); ++ vma_m->vm_start += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_end += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED); ++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags); ++ if (vma_m->vm_file) ++ get_file(vma_m->vm_file); ++ if (vma_m->vm_ops && vma_m->vm_ops->open) ++ vma_m->vm_ops->open(vma_m); ++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m); ++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m); ++ vma_m->vm_mirror = vma; ++ vma->vm_mirror = vma_m; ++} ++#endif ++ + /* + * Return true if the calling process may expand its vm space by the passed + * number of pages +@@ -2238,6 +2692,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) + + lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ cur -= mm->brk_gap; ++#endif ++ ++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1); + if (cur + npages > lim) + return 0; + return 1; +@@ -2307,6 +2767,22 @@ int install_special_mapping(struct mm_struct *mm, + vma->vm_start = addr; + vma->vm_end = addr + len; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++#ifndef CONFIG_PAX_MPROTECT_COMPAT ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) ++ return -EPERM; ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++#else ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + +diff --git a/mm/mprotect.c b/mm/mprotect.c +index 1737c7e..c7faeb4 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -24,10 +24,16 @@ + #include <linux/mmu_notifier.h> + #include <linux/migrate.h> + #include <linux/perf_event.h> ++ ++#ifdef CONFIG_PAX_MPROTECT ++#include <linux/elf.h> ++#endif ++ + #include <asm/uaccess.h> + #include <asm/pgtable.h> + #include <asm/cacheflush.h> + #include <asm/tlbflush.h> ++#include <asm/mmu_context.h> + + #ifndef pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma, + flush_tlb_range(vma, start, end); + } + ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++/* called while holding the mmap semaphor for writing except stack expansion */ ++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) ++{ ++ unsigned long oldlimit, newlimit = 0UL; ++ ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled) ++ return; ++ ++ spin_lock(&mm->page_table_lock); ++ oldlimit = mm->context.user_cs_limit; ++ if ((prot & VM_EXEC) && oldlimit < end) ++ /* USER_CS limit moved up */ ++ newlimit = end; ++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end) ++ /* USER_CS limit moved down */ ++ newlimit = start; ++ ++ if (newlimit) { ++ mm->context.user_cs_limit = newlimit; ++ ++#ifdef CONFIG_SMP ++ wmb(); ++ cpus_clear(mm->context.cpu_user_cs_mask); ++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask); ++#endif ++ ++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id()); ++ } ++ spin_unlock(&mm->page_table_lock); ++ if (newlimit == end) { ++ struct vm_area_struct *vma = find_vma(mm, oldlimit); ++ ++ for (; vma && vma->vm_start < end; vma = vma->vm_next) ++ if (is_vm_hugetlb_page(vma)) ++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot); ++ else ++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma)); ++ } ++} ++#endif ++ + int + mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned long newflags) +@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + int error; + int dirty_accountable = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++ unsigned long start_m, end_m; ++ ++ start_m = start + SEGMEXEC_TASK_SIZE; ++ end_m = end + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (newflags == oldflags) { + *pprev = vma; + return 0; + } + ++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) { ++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next; ++ ++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end) ++ return -ENOMEM; ++ ++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end) ++ return -ENOMEM; ++ } ++ + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we +@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + } + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) { ++ if (start != vma->vm_start) { ++ error = split_vma(mm, vma, start, 1); ++ if (error) ++ goto fail; ++ BUG_ON(!*pprev || (*pprev)->vm_next == vma); ++ *pprev = (*pprev)->vm_next; ++ } ++ ++ if (end != vma->vm_end) { ++ error = split_vma(mm, vma, end, 0); ++ if (error) ++ goto fail; ++ } ++ ++ if (pax_find_mirror_vma(vma)) { ++ error = __do_munmap(mm, start_m, end_m - start_m); ++ if (error) ++ goto fail; ++ } else { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto fail; ++ } ++ vma->vm_flags = newflags; ++ pax_mirror_vma(vma_m, vma); ++ } ++ } ++#endif ++ + /* + * First try to merge with previous and/or next vma. + */ +@@ -195,9 +293,21 @@ success: + * vm_flags and vm_page_prot are protected by the mmap_sem + * held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ)) ++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ; ++#endif ++ + vma->vm_flags = newflags; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->binfmt && mm->binfmt->handle_mprotect) ++ mm->binfmt->handle_mprotect(vma, newflags); ++#endif ++ + vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, +- vm_get_page_prot(newflags)); ++ vm_get_page_prot(vma->vm_flags)); + + if (vma_wants_writenotify(vma)) { + vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); +@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + end = start + len; + if (end <= start) + return -ENOMEM; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (!arch_validate_prot(prot)) + return -EINVAL; + +@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + /* + * Does the application expect PROT_READ to imply PROT_EXEC: + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + prot |= PROT_EXEC; + + vm_flags = calc_vm_prot_bits(prot); +@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + if (start > vma->vm_start) + prev = vma; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect) ++ current->mm->binfmt->handle_mprotect(vma, vm_flags); ++#endif ++ + for (nstart = start ; ; ) { + unsigned long newflags; + +@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + + /* newflags >> 4 shift VM_MAY% in place of VM_% */ + if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { ++ if (prot & (PROT_WRITE | PROT_EXEC)) ++ gr_log_rwxmprotect(vma->vm_file); ++ ++ error = -EACCES; ++ goto out; ++ } ++ ++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { + error = -EACCES; + goto out; + } +@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); + if (error) + goto out; ++ ++ track_exec_limit(current->mm, nstart, tmp, vm_flags); ++ + nstart = tmp; + + if (nstart < prev->vm_end) +diff --git a/mm/mremap.c b/mm/mremap.c +index 3e98d79..1706cec 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, + continue; + pte = ptep_clear_flush(vma, old_addr, old_pte); + pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC) ++ pte = pte_exprotect(pte); ++#endif ++ + set_pte_at(mm, new_addr, new_pte, pte); + } + +@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, + if (is_vm_hugetlb_page(vma)) + goto Einval; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ goto Einval; ++#endif ++ + /* We can't remap across vm area boundaries */ + if (old_len > vma->vm_end - addr) + goto Efault; +@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr, + unsigned long ret = -EINVAL; + unsigned long charged = 0; + unsigned long map_flags; ++ unsigned long pax_task_size = TASK_SIZE; + + if (new_addr & ~PAGE_MASK) + goto out; + +- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len) + goto out; + + /* Check if the location we're moving into overlaps the + * old location at all, and fail if it does. + */ +- if ((new_addr <= addr) && (new_addr+new_len) > addr) +- goto out; +- +- if ((addr <= new_addr) && (addr+old_len) > new_addr) ++ if (addr + old_len > new_addr && new_addr + new_len > addr) + goto out; + + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); +@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr, + struct vm_area_struct *vma; + unsigned long ret = -EINVAL; + unsigned long charged = 0; ++ unsigned long pax_task_size = TASK_SIZE; + + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) + goto out; +@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr, + if (!new_len) + goto out; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (new_len > pax_task_size || addr > pax_task_size-new_len || ++ old_len > pax_task_size || addr > pax_task_size-old_len) ++ goto out; ++ + if (flags & MREMAP_FIXED) { + if (flags & MREMAP_MAYMOVE) + ret = mremap_to(addr, old_len, new_addr, new_len); +@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr, + addr + new_len); + } + ret = addr; ++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags); + goto out; + } + } +@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr, + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); + if (ret) + goto out; ++ ++ map_flags = vma->vm_flags; + ret = move_vma(vma, addr, old_len, new_len, new_addr); ++ if (!(ret & ~PAGE_MASK)) { ++ track_exec_limit(current->mm, addr, addr + old_len, 0UL); ++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags); ++ } + } + out: + if (ret & ~PAGE_MASK) +diff --git a/mm/nommu.c b/mm/nommu.c +index 406e8d4..53970d3 100644 +--- a/mm/nommu.c ++++ b/mm/nommu.c +@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; + int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; +-int heap_stack_gap = 0; + + atomic_long_t mmap_pages_allocated; + +@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) + EXPORT_SYMBOL(find_vma); + + /* +- * find a VMA +- * - we don't extend stack VMAs under NOMMU conditions +- */ +-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) +-{ +- return find_vma(mm, addr); +-} +- +-/* + * expand a stack to a given address + * - not supported under NOMMU conditions + */ +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 3ecab7e..594a471 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -289,7 +289,7 @@ out: + * This usage means that zero-order pages may not be compound. + */ + +-static void free_compound_page(struct page *page) ++void free_compound_page(struct page *page) + { + __free_pages_ok(page, compound_order(page)); + } +@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order) + int bad = 0; + int wasMlocked = __TestClearPageMlocked(page); + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ unsigned long index = 1UL << order; ++#endif ++ + kmemcheck_free_shadow(page, order); + + for (i = 0 ; i < (1 << order) ; ++i) +@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order) + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE << order); + } ++ ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ for (; index; --index) ++ sanitize_highpage(page + index - 1); ++#endif ++ + arch_free_page(page, order); + kernel_map_pages(page, 1 << order, 0); + +@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + ++#ifndef CONFIG_PAX_MEMORY_SANITIZE + if (gfp_flags & __GFP_ZERO) + prep_zero_page(page, order, gfp_flags); ++#endif + + if (order && (gfp_flags & __GFP_COMP)) + prep_compound_page(page, order); +@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold) + debug_check_no_locks_freed(page_address(page), PAGE_SIZE); + debug_check_no_obj_freed(page_address(page), PAGE_SIZE); + } ++ ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ sanitize_highpage(page); ++#endif ++ + arch_free_page(page, 0); + kernel_map_pages(page, 1, 0); + +@@ -2179,6 +2196,8 @@ void show_free_areas(void) + int cpu; + struct zone *zone; + ++ pax_track_stack(); ++ + for_each_populated_zone(zone) { + show_node(zone); + printk("%s per-cpu:\n", zone->name); +@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat, + zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); + } + #else +-static void inline setup_usemap(struct pglist_data *pgdat, ++static inline void setup_usemap(struct pglist_data *pgdat, + struct zone *zone, unsigned long zonesize) {} + #endif /* CONFIG_SPARSEMEM */ + +diff --git a/mm/percpu.c b/mm/percpu.c +index c90614a..5f7b7b8 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly; + static unsigned int pcpu_high_unit_cpu __read_mostly; + + /* the address of the first chunk which starts with the kernel static area */ +-void *pcpu_base_addr __read_mostly; ++void *pcpu_base_addr __read_only; + EXPORT_SYMBOL_GPL(pcpu_base_addr); + + static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ +diff --git a/mm/rmap.c b/mm/rmap.c +index dd43373..d848cd7 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma) + /* page_table_lock to protect against threads */ + spin_lock(&mm->page_table_lock); + if (likely(!vma->anon_vma)) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma); ++ ++ if (vma_m) { ++ BUG_ON(vma_m->anon_vma); ++ vma_m->anon_vma = anon_vma; ++ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head); ++ } ++#endif ++ + vma->anon_vma = anon_vma; + list_add_tail(&vma->anon_vma_node, &anon_vma->head); + allocated = NULL; +diff --git a/mm/shmem.c b/mm/shmem.c +index 3e0005b..1d659a8 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -31,7 +31,7 @@ + #include <linux/swap.h> + #include <linux/ima.h> + +-static struct vfsmount *shm_mnt; ++struct vfsmount *shm_mnt; + + #ifdef CONFIG_SHMEM + /* +@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) + goto unlock; + } + entry = shmem_swp_entry(info, index, NULL); ++ if (!entry) ++ goto unlock; + if (entry->val) { + /* + * The more uptodate page coming down from a stacked +@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, + struct vm_area_struct pvma; + struct page *page; + ++ pax_track_stack(); ++ + spol = mpol_cond_copy(&mpol, + mpol_shared_policy_lookup(&info->policy, idx)); + +@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s + + info = SHMEM_I(inode); + inode->i_size = len-1; +- if (len <= (char *)inode - (char *)info) { ++ if (len <= (char *)inode - (char *)info && len <= 64) { + /* do it inline */ + memcpy(info, symname, len); + inode->i_op = &shmem_symlink_inline_operations; +@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) + int err = -ENOMEM; + + /* Round up to L1_CACHE_BYTES to resist false sharing */ +- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), +- L1_CACHE_BYTES), GFP_KERNEL); ++ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL); + if (!sbinfo) + return -ENOMEM; + +diff --git a/mm/slab.c b/mm/slab.c +index c8d466a..909e01e 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -174,7 +174,7 @@ + + /* Legal flag mask for kmem_cache_create(). */ + #if DEBUG +-# define CREATE_MASK (SLAB_RED_ZONE | \ ++# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \ + SLAB_POISON | SLAB_HWCACHE_ALIGN | \ + SLAB_CACHE_DMA | \ + SLAB_STORE_USER | \ +@@ -182,7 +182,7 @@ + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) + #else +-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ ++# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \ + SLAB_CACHE_DMA | \ + SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ +@@ -308,7 +308,7 @@ struct kmem_list3 { + * Need this for bootstrapping a per node allocator. + */ + #define NUM_INIT_LISTS (3 * MAX_NUMNODES) +-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; ++struct kmem_list3 initkmem_list3[NUM_INIT_LISTS]; + #define CACHE_CACHE 0 + #define SIZE_AC MAX_NUMNODES + #define SIZE_L3 (2 * MAX_NUMNODES) +@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent) + if ((x)->max_freeable < i) \ + (x)->max_freeable = i; \ + } while (0) +-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) +-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) +-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) +-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) ++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit) ++#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss) ++#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit) ++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss) + #else + #define STATS_INC_ACTIVE(x) do { } while (0) + #define STATS_DEC_ACTIVE(x) do { } while (0) +@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, + * reciprocal_divide(offset, cache->reciprocal_buffer_size) + */ + static inline unsigned int obj_to_index(const struct kmem_cache *cache, +- const struct slab *slab, void *obj) ++ const struct slab *slab, const void *obj) + { + u32 offset = (obj - slab->s_mem); + return reciprocal_divide(offset, cache->reciprocal_buffer_size); +@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void) + sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, + sizes[INDEX_AC].cs_size, + ARCH_KMALLOC_MINALIGN, +- ARCH_KMALLOC_FLAGS|SLAB_PANIC, ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, + NULL); + + if (INDEX_AC != INDEX_L3) { +@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void) + kmem_cache_create(names[INDEX_L3].name, + sizes[INDEX_L3].cs_size, + ARCH_KMALLOC_MINALIGN, +- ARCH_KMALLOC_FLAGS|SLAB_PANIC, ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, + NULL); + } + +@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void) + sizes->cs_cachep = kmem_cache_create(names->name, + sizes->cs_size, + ARCH_KMALLOC_MINALIGN, +- ARCH_KMALLOC_FLAGS|SLAB_PANIC, ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, + NULL); + } + #ifdef CONFIG_ZONE_DMA +@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p) + } + /* cpu stats */ + { +- unsigned long allochit = atomic_read(&cachep->allochit); +- unsigned long allocmiss = atomic_read(&cachep->allocmiss); +- unsigned long freehit = atomic_read(&cachep->freehit); +- unsigned long freemiss = atomic_read(&cachep->freemiss); ++ unsigned long allochit = atomic_read_unchecked(&cachep->allochit); ++ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss); ++ unsigned long freehit = atomic_read_unchecked(&cachep->freehit); ++ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss); + + seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", + allochit, allocmiss, freehit, freemiss); +@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = { + + static int __init slab_proc_init(void) + { +- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); ++ mode_t gr_mode = S_IRUGO; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ ++ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations); + #ifdef CONFIG_DEBUG_SLAB_LEAK +- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); ++ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations); + #endif + return 0; + } + module_init(slab_proc_init); + #endif + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct page *page; ++ struct kmem_cache *cachep = NULL; ++ struct slab *slabp; ++ unsigned int objnr; ++ unsigned long offset; ++ const char *type; ++ ++ if (!n) ++ return; ++ ++ type = "<null>"; ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ page = virt_to_head_page(ptr); ++ ++ type = "<process stack>"; ++ if (!PageSlab(page)) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ cachep = page_get_cache(page); ++ type = cachep->name; ++ if (!(cachep->flags & SLAB_USERCOPY)) ++ goto report; ++ ++ slabp = page_get_slab(page); ++ objnr = obj_to_index(cachep, slabp, ptr); ++ BUG_ON(objnr >= cachep->num); ++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep); ++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset) ++ return; ++ ++report: ++ pax_report_usercopy(ptr, n, to, type); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + /** + * ksize - get the actual amount of memory allocated for a given object + * @objp: Pointer to the object +diff --git a/mm/slob.c b/mm/slob.c +index 837ebd6..0bd23bc 100644 +--- a/mm/slob.c ++++ b/mm/slob.c +@@ -29,7 +29,7 @@ + * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls + * alloc_pages() directly, allocating compound pages so the page order + * does not have to be separately tracked, and also stores the exact +- * allocation size in page->private so that it can be used to accurately ++ * allocation size in slob_page->size so that it can be used to accurately + * provide ksize(). These objects are detected in kfree() because slob_page() + * is false for them. + * +@@ -58,6 +58,7 @@ + */ + + #include <linux/kernel.h> ++#include <linux/sched.h> + #include <linux/slab.h> + #include <linux/mm.h> + #include <linux/swap.h> /* struct reclaim_state */ +@@ -100,7 +101,8 @@ struct slob_page { + unsigned long flags; /* mandatory */ + atomic_t _count; /* mandatory */ + slobidx_t units; /* free units left in page */ +- unsigned long pad[2]; ++ unsigned long pad[1]; ++ unsigned long size; /* size when >=PAGE_SIZE */ + slob_t *free; /* first free slob_t in page */ + struct list_head list; /* linked list of free pages */ + }; +@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large); + */ + static inline int is_slob_page(struct slob_page *sp) + { +- return PageSlab((struct page *)sp); ++ return PageSlab((struct page *)sp) && !sp->size; + } + + static inline void set_slob_page(struct slob_page *sp) +@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp) + + static inline struct slob_page *slob_page(const void *addr) + { +- return (struct slob_page *)virt_to_page(addr); ++ return (struct slob_page *)virt_to_head_page(addr); + } + + /* +@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) + /* + * Return the size of a slob block. + */ +-static slobidx_t slob_units(slob_t *s) ++static slobidx_t slob_units(const slob_t *s) + { + if (s->units > 0) + return s->units; +@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s) + /* + * Return the next free slob block pointer after this one. + */ +-static slob_t *slob_next(slob_t *s) ++static slob_t *slob_next(const slob_t *s) + { + slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); + slobidx_t next; +@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s) + /* + * Returns true if s is the last free block in its page. + */ +-static int slob_last(slob_t *s) ++static int slob_last(const slob_t *s) + { + return !((unsigned long)slob_next(s) & ~PAGE_MASK); + } +@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) + if (!page) + return NULL; + ++ set_slob_page(page); + return page_address(page); + } + +@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) + if (!b) + return NULL; + sp = slob_page(b); +- set_slob_page(sp); + + spin_lock_irqsave(&slob_lock, flags); + sp->units = SLOB_UNITS(PAGE_SIZE); + sp->free = b; ++ sp->size = 0; + INIT_LIST_HEAD(&sp->list); + set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); + set_slob_page_free(sp, slob_list); +@@ -475,10 +478,9 @@ out: + #define ARCH_SLAB_MINALIGN __alignof__(unsigned long) + #endif + +-void *__kmalloc_node(size_t size, gfp_t gfp, int node) ++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align) + { +- unsigned int *m; +- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ++ slob_t *m; + void *ret; + + lockdep_trace_alloc(gfp); +@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) + + if (!m) + return NULL; +- *m = size; ++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT); ++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT); ++ m[0].units = size; ++ m[1].units = align; + ret = (void *)m + align; + + trace_kmalloc_node(_RET_IP_, ret, +@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) + + ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); + if (ret) { +- struct page *page; +- page = virt_to_page(ret); +- page->private = size; ++ struct slob_page *sp; ++ sp = slob_page(ret); ++ sp->size = size; + } + + trace_kmalloc_node(_RET_IP_, ret, + size, PAGE_SIZE << order, gfp, node); + } + +- kmemleak_alloc(ret, size, 1, gfp); ++ return ret; ++} ++ ++void *__kmalloc_node(size_t size, gfp_t gfp, int node) ++{ ++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ++ void *ret = __kmalloc_node_align(size, gfp, node, align); ++ ++ if (!ZERO_OR_NULL_PTR(ret)) ++ kmemleak_alloc(ret, size, 1, gfp); + return ret; + } + EXPORT_SYMBOL(__kmalloc_node); +@@ -528,13 +542,92 @@ void kfree(const void *block) + sp = slob_page(block); + if (is_slob_page(sp)) { + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- unsigned int *m = (unsigned int *)(block - align); +- slob_free(m, *m + align); +- } else ++ slob_t *m = (slob_t *)(block - align); ++ slob_free(m, m[0].units + align); ++ } else { ++ clear_slob_page(sp); ++ free_slob_page(sp); ++ sp->size = 0; + put_page(&sp->page); ++ } + } + EXPORT_SYMBOL(kfree); + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct slob_page *sp; ++ const slob_t *free; ++ const void *base; ++ unsigned long flags; ++ const char *type; ++ ++ if (!n) ++ return; ++ ++ type = "<null>"; ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ type = "<process stack>"; ++ sp = slob_page(ptr); ++ if (!PageSlab((struct page *)sp)) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ type = "<slob>"; ++ if (sp->size) { ++ base = page_address(&sp->page); ++ if (base <= ptr && n <= sp->size - (ptr - base)) ++ return; ++ goto report; ++ } ++ ++ /* some tricky double walking to find the chunk */ ++ spin_lock_irqsave(&slob_lock, flags); ++ base = (void *)((unsigned long)ptr & PAGE_MASK); ++ free = sp->free; ++ ++ while (!slob_last(free) && (void *)free <= ptr) { ++ base = free + slob_units(free); ++ free = slob_next(free); ++ } ++ ++ while (base < (void *)free) { ++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units; ++ int size = SLOB_UNIT * SLOB_UNITS(m + align); ++ int offset; ++ ++ if (ptr < base + align) ++ break; ++ ++ offset = ptr - base - align; ++ if (offset >= m) { ++ base += size; ++ continue; ++ } ++ ++ if (n > m - offset) ++ break; ++ ++ spin_unlock_irqrestore(&slob_lock, flags); ++ return; ++ } ++ ++ spin_unlock_irqrestore(&slob_lock, flags); ++report: ++ pax_report_usercopy(ptr, n, to, type); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ + size_t ksize(const void *block) + { +@@ -547,10 +640,10 @@ size_t ksize(const void *block) + sp = slob_page(block); + if (is_slob_page(sp)) { + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- unsigned int *m = (unsigned int *)(block - align); +- return SLOB_UNITS(*m) * SLOB_UNIT; ++ slob_t *m = (slob_t *)(block - align); ++ return SLOB_UNITS(m[0].units) * SLOB_UNIT; + } else +- return sp->page.private; ++ return sp->size; + } + EXPORT_SYMBOL(ksize); + +@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, + { + struct kmem_cache *c; + ++#ifdef CONFIG_PAX_USERCOPY ++ c = __kmalloc_node_align(sizeof(struct kmem_cache), ++ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN); ++#else + c = slob_alloc(sizeof(struct kmem_cache), + GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); ++#endif + + if (c) { + c->name = name; +@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) + { + void *b; + ++#ifdef CONFIG_PAX_USERCOPY ++ b = __kmalloc_node_align(c->size, flags, node, c->align); ++#else + if (c->size < PAGE_SIZE) { + b = slob_alloc(c->size, flags, c->align, node); + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + SLOB_UNITS(c->size) * SLOB_UNIT, + flags, node); + } else { ++ struct slob_page *sp; ++ + b = slob_new_pages(flags, get_order(c->size), node); ++ sp = slob_page(b); ++ sp->size = c->size; + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + PAGE_SIZE << get_order(c->size), + flags, node); + } ++#endif + + if (c->ctor) + c->ctor(b); +@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); + + static void __kmem_cache_free(void *b, int size) + { +- if (size < PAGE_SIZE) ++ struct slob_page *sp = slob_page(b); ++ ++ if (is_slob_page(sp)) + slob_free(b, size); +- else ++ else { ++ clear_slob_page(sp); ++ free_slob_page(sp); ++ sp->size = 0; + slob_free_pages(b, get_order(size)); ++ } + } + + static void kmem_rcu_free(struct rcu_head *head) +@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head) + + void kmem_cache_free(struct kmem_cache *c, void *b) + { ++ int size = c->size; ++ ++#ifdef CONFIG_PAX_USERCOPY ++ if (size + c->align < PAGE_SIZE) { ++ size += c->align; ++ b -= c->align; ++ } ++#endif ++ + kmemleak_free_recursive(b, c->flags); + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { + struct slob_rcu *slob_rcu; +- slob_rcu = b + (c->size - sizeof(struct slob_rcu)); ++ slob_rcu = b + (size - sizeof(struct slob_rcu)); + INIT_RCU_HEAD(&slob_rcu->head); +- slob_rcu->size = c->size; ++ slob_rcu->size = size; + call_rcu(&slob_rcu->head, kmem_rcu_free); + } else { +- __kmem_cache_free(b, c->size); ++ __kmem_cache_free(b, size); + } + ++#ifdef CONFIG_PAX_USERCOPY ++ trace_kfree(_RET_IP_, b); ++#else + trace_kmem_cache_free(_RET_IP_, b); ++#endif ++ + } + EXPORT_SYMBOL(kmem_cache_free); + +diff --git a/mm/slub.c b/mm/slub.c +index 4996fc7..87e01d0 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -201,7 +201,7 @@ struct track { + + enum track_item { TRACK_ALLOC, TRACK_FREE }; + +-#ifdef CONFIG_SLUB_DEBUG ++#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_add(struct kmem_cache *); + static int sysfs_slab_alias(struct kmem_cache *, const char *); + static void sysfs_slab_remove(struct kmem_cache *); +@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t) + if (!t->addr) + return; + +- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", ++ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n", + s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); + } + +@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) + + page = virt_to_head_page(x); + ++ BUG_ON(!PageSlab(page)); ++ + slab_free(s, page, x, _RET_IP_); + + trace_kmem_cache_free(_RET_IP_, x); +@@ -1937,7 +1939,7 @@ static int slub_min_objects; + * Merge control. If this is set then no merging of slab caches will occur. + * (Could be removed. This was introduced to pacify the merge skeptics.) + */ +-static int slub_nomerge; ++static int slub_nomerge = 1; + + /* + * Calculate the order of allocation given an slab object size. +@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, + * list to avoid pounding the page allocator excessively. + */ + set_min_partial(s, ilog2(s->size)); +- s->refcount = 1; ++ atomic_set(&s->refcount, 1); + #ifdef CONFIG_NUMA + s->remote_node_defrag_ratio = 1000; + #endif +@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s) + void kmem_cache_destroy(struct kmem_cache *s) + { + down_write(&slub_lock); +- s->refcount--; +- if (!s->refcount) { ++ if (atomic_dec_and_test(&s->refcount)) { + list_del(&s->list); + up_write(&slub_lock); + if (kmem_cache_close(s)) { +@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str) + __setup("slub_nomerge", setup_slub_nomerge); + + static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, +- const char *name, int size, gfp_t gfp_flags) ++ const char *name, int size, gfp_t gfp_flags, unsigned int flags) + { +- unsigned int flags = 0; +- + if (gfp_flags & SLUB_DMA) +- flags = SLAB_CACHE_DMA; ++ flags |= SLAB_CACHE_DMA; + + /* + * This function is called with IRQs disabled during early-boot on +@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) + EXPORT_SYMBOL(__kmalloc_node); + #endif + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct page *page; ++ struct kmem_cache *s = NULL; ++ unsigned long offset; ++ const char *type; ++ ++ if (!n) ++ return; ++ ++ type = "<null>"; ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ page = get_object_page(ptr); ++ ++ type = "<process stack>"; ++ if (!page) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ s = page->slab; ++ type = s->name; ++ if (!(s->flags & SLAB_USERCOPY)) ++ goto report; ++ ++ offset = (ptr - page_address(page)) % s->size; ++ if (offset <= s->objsize && n <= s->objsize - offset) ++ return; ++ ++report: ++ pax_report_usercopy(ptr, n, to, type); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + size_t ksize(const void *object) + { + struct page *page; +@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void) + * kmem_cache_open for slab_state == DOWN. + */ + create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", +- sizeof(struct kmem_cache_node), GFP_NOWAIT); +- kmalloc_caches[0].refcount = -1; ++ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0); ++ atomic_set(&kmalloc_caches[0].refcount, -1); + caches++; + + hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); +@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void) + /* Caches that are not of the two-to-the-power-of size */ + if (KMALLOC_MIN_SIZE <= 32) { + create_kmalloc_cache(&kmalloc_caches[1], +- "kmalloc-96", 96, GFP_NOWAIT); ++ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY); + caches++; + } + if (KMALLOC_MIN_SIZE <= 64) { + create_kmalloc_cache(&kmalloc_caches[2], +- "kmalloc-192", 192, GFP_NOWAIT); ++ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY); + caches++; + } + + for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { + create_kmalloc_cache(&kmalloc_caches[i], +- "kmalloc", 1 << i, GFP_NOWAIT); ++ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY); + caches++; + } + +@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s) + /* + * We may have set a slab to be unmergeable during bootstrap. + */ +- if (s->refcount < 0) ++ if (atomic_read(&s->refcount) < 0) + return 1; + + return 0; +@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, + if (s) { + int cpu; + +- s->refcount++; ++ atomic_inc(&s->refcount); + /* + * Adjust the object sizes so that we clear + * the complete object on kzalloc. +@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, + + if (sysfs_slab_alias(s, name)) { + down_write(&slub_lock); +- s->refcount--; ++ atomic_dec(&s->refcount); + up_write(&slub_lock); + goto err; + } +@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor); + + static ssize_t aliases_show(struct kmem_cache *s, char *buf) + { +- return sprintf(buf, "%d\n", s->refcount - 1); ++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1); + } + SLAB_ATTR_RO(aliases); + +@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj) + kfree(s); + } + +-static struct sysfs_ops slab_sysfs_ops = { ++static const struct sysfs_ops slab_sysfs_ops = { + .show = slab_attr_show, + .store = slab_attr_store, + }; +@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj) + return 0; + } + +-static struct kset_uevent_ops slab_uevent_ops = { ++static const struct kset_uevent_ops slab_uevent_ops = { + .filter = uevent_filter, + }; + +@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s) + return name; + } + ++#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_add(struct kmem_cache *s) + { + int err; +@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) + kobject_del(&s->kobj); + kobject_put(&s->kobj); + } ++#endif + + /* + * Need to buffer aliases during bootup until sysfs becomes +@@ -4632,6 +4677,7 @@ struct saved_alias { + + static struct saved_alias *alias_list; + ++#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_alias(struct kmem_cache *s, const char *name) + { + struct saved_alias *al; +@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) + alias_list = al; + return 0; + } ++#endif + + static int __init slab_sysfs_init(void) + { +@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = { + + static int __init slab_proc_init(void) + { +- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); ++ mode_t gr_mode = S_IRUGO; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ ++ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations); + return 0; + } + module_init(slab_proc_init); +diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c +index d9714bd..ce08e28 100644 +--- a/mm/sparse-vmemmap.c ++++ b/mm/sparse-vmemmap.c +@@ -108,7 +108,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) + void *p = vmemmap_alloc_block(PAGE_SIZE, node); + if (!p) + return NULL; +- pud_populate(&init_mm, pud, p); ++ pud_populate_kernel(&init_mm, pud, p); + } + return pud; + } +@@ -120,7 +120,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) + void *p = vmemmap_alloc_block(PAGE_SIZE, node); + if (!p) + return NULL; +- pgd_populate(&init_mm, pgd, p); ++ pgd_populate_kernel(&init_mm, pgd, p); + } + return pgd; + } +diff --git a/mm/swap.c b/mm/swap.c +index 308e57d..5de19c0 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -30,6 +30,7 @@ + #include <linux/notifier.h> + #include <linux/backing-dev.h> + #include <linux/memcontrol.h> ++#include <linux/hugetlb.h> + + #include "internal.h" + +@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page) + compound_page_dtor *dtor; + + dtor = get_compound_page_dtor(page); ++ if (!PageHuge(page)) ++ BUG_ON(dtor != free_compound_page); + (*dtor)(page); + } + } +diff --git a/mm/util.c b/mm/util.c +index e48b493..24a601d 100644 +--- a/mm/util.c ++++ b/mm/util.c +@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user); + void arch_pick_mmap_layout(struct mm_struct *mm) + { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index f34ffd0..1a7ff39 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) + + pte = pte_offset_kernel(pmd, addr); + do { +- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); +- WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) { ++ BUG_ON(!pte_exec(*pte)); ++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); ++ continue; ++ } ++#endif ++ ++ { ++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); ++ WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ } + } while (pte++, addr += PAGE_SIZE, addr != end); + } + +@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, pgprot_t prot, struct page **pages, int *nr) + { + pte_t *pte; ++ int ret = -ENOMEM; + + /* + * nr is a running index into the array which helps higher level +@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, + pte = pte_alloc_kernel(pmd, addr); + if (!pte) + return -ENOMEM; ++ ++ pax_open_kernel(); + do { + struct page *page = pages[*nr]; + +- if (WARN_ON(!pte_none(*pte))) +- return -EBUSY; +- if (WARN_ON(!page)) +- return -ENOMEM; ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT); ++ else ++#endif ++ ++ if (WARN_ON(!pte_none(*pte))) { ++ ret = -EBUSY; ++ goto out; ++ } ++ if (WARN_ON(!page)) { ++ ret = -ENOMEM; ++ goto out; ++ } + set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); + (*nr)++; + } while (pte++, addr += PAGE_SIZE, addr != end); +- return 0; ++ ret = 0; ++out: ++ pax_close_kernel(); ++ return ret; + } + + static int vmap_pmd_range(pud_t *pud, unsigned long addr, +@@ -120,7 +147,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, + pmd_t *pmd; + unsigned long next; + +- pmd = pmd_alloc(&init_mm, pud, addr); ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -137,7 +164,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, + pud_t *pud; + unsigned long next; + +- pud = pud_alloc(&init_mm, pgd, addr); ++ pud = pud_alloc_kernel(&init_mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { +@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x) + * and fall back on vmalloc() if that fails. Others + * just put it in the vmalloc space. + */ +-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) ++#ifdef CONFIG_MODULES ++#ifdef MODULES_VADDR + unsigned long addr = (unsigned long)x; + if (addr >= MODULES_VADDR && addr < MODULES_END) + return 1; + #endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END) ++ return 1; ++#endif ++ ++#endif ++ + return is_vmalloc_addr(x); + } + +@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) + + if (!pgd_none(*pgd)) { + pud_t *pud = pud_offset(pgd, addr); ++#ifdef CONFIG_X86 ++ if (!pud_large(*pud)) ++#endif + if (!pud_none(*pud)) { + pmd_t *pmd = pmd_offset(pud, addr); ++#ifdef CONFIG_X86 ++ if (!pmd_large(*pmd)) ++#endif + if (!pmd_none(*pmd)) { + pte_t *ptep, pte; + +@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va) + struct rb_node *tmp; + + while (*p) { +- struct vmap_area *tmp; ++ struct vmap_area *varea; + + parent = *p; +- tmp = rb_entry(parent, struct vmap_area, rb_node); +- if (va->va_start < tmp->va_end) ++ varea = rb_entry(parent, struct vmap_area, rb_node); ++ if (va->va_start < varea->va_end) + p = &(*p)->rb_left; +- else if (va->va_end > tmp->va_start) ++ else if (va->va_end > varea->va_start) + p = &(*p)->rb_right; + else + BUG(); +@@ -326,6 +368,10 @@ static void purge_vmap_area_lazy(void); + static struct vmap_area *alloc_vmap_area(unsigned long size, + unsigned long align, + unsigned long vstart, unsigned long vend, ++ int node, gfp_t gfp_mask) __size_overflow(1); ++static struct vmap_area *alloc_vmap_area(unsigned long size, ++ unsigned long align, ++ unsigned long vstart, unsigned long vend, + int node, gfp_t gfp_mask) + { + struct vmap_area *va; +@@ -1245,6 +1291,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, + struct vm_struct *area; + + BUG_ON(in_interrupt()); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (flags & VM_KERNEXEC) { ++ if (start != VMALLOC_START || end != VMALLOC_END) ++ return NULL; ++ start = (unsigned long)MODULES_EXEC_VADDR; ++ end = (unsigned long)MODULES_EXEC_END; ++ } ++#endif ++ + if (flags & VM_IOREMAP) { + int bit = fls(size); + +@@ -1484,6 +1540,11 @@ void *vmap(struct page **pages, unsigned int count, + if (count > totalram_pages) + return NULL; + ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ flags |= VM_KERNEXEC; ++#endif ++ + area = get_vm_area_caller((count << PAGE_SHIFT), flags, + __builtin_return_address(0)); + if (!area) +@@ -1584,6 +1645,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) + */ + static void *__vmalloc_node(unsigned long size, unsigned long align, + gfp_t gfp_mask, pgprot_t prot, ++ int node, void *caller) __size_overflow(1); ++static void *__vmalloc_node(unsigned long size, unsigned long align, ++ gfp_t gfp_mask, pgprot_t prot, + int node, void *caller) + { + struct vm_struct *area; +@@ -1594,6 +1658,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, + if (!size || (size >> PAGE_SHIFT) > totalram_pages) + return NULL; + ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC, ++ VMALLOC_START, VMALLOC_END, node, ++ gfp_mask, caller); ++ else ++#endif ++ + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, + VMALLOC_START, VMALLOC_END, node, + gfp_mask, caller); +@@ -1698,10 +1770,9 @@ EXPORT_SYMBOL(vmalloc_node); + * For tight control over page level allocator and protection flags + * use __vmalloc() instead. + */ +- + void *vmalloc_exec(unsigned long size) + { +- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, ++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC, + -1, __builtin_return_address(0)); + } + +@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + ++ BUG_ON(vma->vm_mirror); ++ + if ((PAGE_SIZE-1) & (unsigned long)addr) + return -EINVAL; + +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 42d76c6..5643dc4 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu) + * + * vm_stat contains the global counters + */ +-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + EXPORT_SYMBOL(vm_stat); + + #ifdef CONFIG_SMP +@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu) + v = p->vm_stat_diff[i]; + p->vm_stat_diff[i] = 0; + local_irq_restore(flags); +- atomic_long_add(v, &zone->vm_stat[i]); ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]); + global_diff[i] += v; + #ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ +@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu) + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (global_diff[i]) +- atomic_long_add(global_diff[i], &vm_stat[i]); ++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]); + } + + #endif +@@ -953,10 +953,20 @@ static int __init setup_vmstat(void) + start_cpu_timer(cpu); + #endif + #ifdef CONFIG_PROC_FS +- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); +- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); +- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); +- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); ++ { ++ mode_t gr_mode = S_IRUGO; ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations); ++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops); ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations); ++#else ++ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations); ++#endif ++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations); ++ } + #endif + return 0; + } +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index a29c5ab..6143f20 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) + err = -EPERM; + if (!capable(CAP_NET_ADMIN)) + break; +- if ((args.u.name_type >= 0) && +- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { ++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { + struct vlan_net *vn; + + vn = net_generic(net, vlan_net_id); +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c +index a2d2984..f9eb711 100644 +--- a/net/9p/trans_fd.c ++++ b/net/9p/trans_fd.c +@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len) + oldfs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); ++ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos); + set_fs(oldfs); + + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) +diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c +index 02cc7e7..4514f1b 100644 +--- a/net/atm/atm_misc.c ++++ b/net/atm/atm_misc.c +@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize) + if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) + return 1; + atm_return(vcc,truesize); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return 0; + } + +@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, + } + } + atm_return(vcc,guess); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return NULL; + } + +@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp) + + void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to) + + void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +diff --git a/net/atm/lec.h b/net/atm/lec.h +index 9d14d19..5c145f3 100644 +--- a/net/atm/lec.h ++++ b/net/atm/lec.h +@@ -48,7 +48,7 @@ struct lane2_ops { + const u8 *tlvs, u32 sizeoftlvs); + void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr, + const u8 *tlvs, u32 sizeoftlvs); +-}; ++} __no_const; + + /* + * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType +diff --git a/net/atm/mpc.h b/net/atm/mpc.h +index 0919a88..a23d54e 100644 +--- a/net/atm/mpc.h ++++ b/net/atm/mpc.h +@@ -33,7 +33,7 @@ struct mpoa_client { + struct mpc_parameters parameters; /* parameters for this client */ + + const struct net_device_ops *old_ops; +- struct net_device_ops new_ops; ++ net_device_ops_no_const new_ops; + }; + + +diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c +index 4504a4b..1733f1e 100644 +--- a/net/atm/mpoa_caches.c ++++ b/net/atm/mpoa_caches.c +@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client) + struct timeval now; + struct k_message msg; + ++ pax_track_stack(); ++ + do_gettimeofday(&now); + + write_lock_irq(&client->egress_lock); +diff --git a/net/atm/proc.c b/net/atm/proc.c +index ab8419a..aa91497 100644 +--- a/net/atm/proc.c ++++ b/net/atm/proc.c +@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal, + const struct k_atm_aal_stats *stats) + { + seq_printf(seq, "%s ( %d %d %d %d %d )", aal, +- atomic_read(&stats->tx),atomic_read(&stats->tx_err), +- atomic_read(&stats->rx),atomic_read(&stats->rx_err), +- atomic_read(&stats->rx_drop)); ++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err), ++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err), ++ atomic_read_unchecked(&stats->rx_drop)); + } + + static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) +@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc) + { + struct sock *sk = sk_atm(vcc); + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(seq, "%p ", NULL); ++#else + seq_printf(seq, "%p ", vcc); ++#endif ++ + if (!vcc->dev) + seq_printf(seq, "Unassigned "); + else +@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) + { + if (!vcc->dev) + seq_printf(seq, sizeof(void *) == 4 ? ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ "N/A@%p%10s" : "N/A@%p%2s", NULL, ""); ++#else + "N/A@%p%10s" : "N/A@%p%2s", vcc, ""); ++#endif + else + seq_printf(seq, "%3d %3d %5d ", + vcc->dev->number, vcc->vpi, vcc->vci); +diff --git a/net/atm/resources.c b/net/atm/resources.c +index 56b7322..c48b84e 100644 +--- a/net/atm/resources.c ++++ b/net/atm/resources.c +@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev) + static void copy_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from, + static void subtract_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h +index 8567d47..bba2292 100644 +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port); + + #ifdef CONFIG_SYSFS + /* br_sysfs_if.c */ +-extern struct sysfs_ops brport_sysfs_ops; ++extern const struct sysfs_ops brport_sysfs_ops; + extern int br_sysfs_addif(struct net_bridge_port *p); + + /* br_sysfs_br.c */ +diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c +index 9a52ac5..c97538e 100644 +--- a/net/bridge/br_stp_if.c ++++ b/net/bridge/br_stp_if.c +@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br) + char *envp[] = { NULL }; + + if (br->stp_enabled == BR_USER_STP) { +- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); ++ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); + printk(KERN_INFO "%s: userspace STP stopped, return code %d\n", + br->dev->name, r); + +diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c +index 820643a..ce77fb3 100644 +--- a/net/bridge/br_sysfs_if.c ++++ b/net/bridge/br_sysfs_if.c +@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj, + return ret; + } + +-struct sysfs_ops brport_sysfs_ops = { ++const struct sysfs_ops brport_sysfs_ops = { + .show = brport_show, + .store = brport_store, + }; +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index d73d47f..72df42a 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user, + unsigned int entries_size, nentries; + char *entries; + ++ pax_track_stack(); ++ + if (cmd == EBT_SO_GET_ENTRIES) { + entries_size = t->private->entries_size; + nentries = t->private->nentries; +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 2ffd2e0..72a7486 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v) + struct bcm_sock *bo = bcm_sk(sk); + struct bcm_op *op; + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, ">>> socket %p", NULL); ++ seq_printf(m, " / sk %p", NULL); ++ seq_printf(m, " / bo %p", NULL); ++#else + seq_printf(m, ">>> socket %p", sk->sk_socket); + seq_printf(m, " / sk %p", sk); + seq_printf(m, " / bo %p", bo); ++#endif + seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); + seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); + seq_printf(m, " <<<\n"); +diff --git a/net/compat.c b/net/compat.c +index 9559afc..ccd74e1 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) + __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || + __get_user(kmsg->msg_flags, &umsg->msg_flags)) + return -EFAULT; +- kmsg->msg_name = compat_ptr(tmp1); +- kmsg->msg_iov = compat_ptr(tmp2); +- kmsg->msg_control = compat_ptr(tmp3); ++ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1); ++ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2); ++ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3); + return 0; + } + +@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + kern_msg->msg_name = NULL; + + tot_len = iov_from_user_compat_to_kern(kern_iov, +- (struct compat_iovec __user *)kern_msg->msg_iov, ++ (struct compat_iovec __force_user *)kern_msg->msg_iov, + kern_msg->msg_iovlen); + if (tot_len >= 0) + kern_msg->msg_iov = kern_iov; +@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + + #define CMSG_COMPAT_FIRSTHDR(msg) \ + (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \ +- (struct compat_cmsghdr __user *)((msg)->msg_control) : \ ++ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \ + (struct compat_cmsghdr __user *)NULL) + + #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \ + ((ucmlen) >= sizeof(struct compat_cmsghdr) && \ + (ucmlen) <= (unsigned long) \ + ((mhdr)->msg_controllen - \ +- ((char *)(ucmsg) - (char *)(mhdr)->msg_control))) ++ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control))) + + static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg, + struct compat_cmsghdr __user *cmsg, int cmsg_len) + { + char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len); +- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) > ++ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) > + msg->msg_controllen) + return NULL; + return (struct compat_cmsghdr __user *)ptr; +@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat + { + struct compat_timeval ctv; + struct compat_timespec cts[3]; +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; + struct compat_cmsghdr cmhdr; + int cmlen; + +@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat + + void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) + { +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; + int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); + int fdnum = scm->fp->count; + struct file **fp = scm->fp->fp; +@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, + len = sizeof(ktime); + old_fs = get_fs(); + set_fs(KERNEL_DS); +- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len); ++ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len); + set_fs(old_fs); + + if (!err) { +@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + case MCAST_JOIN_GROUP: + case MCAST_LEAVE_GROUP: + { +- struct compat_group_req __user *gr32 = (void *)optval; ++ struct compat_group_req __user *gr32 = (void __user *)optval; + struct group_req __user *kgr = + compat_alloc_user_space(sizeof(struct group_req)); + u32 interface; +@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + case MCAST_BLOCK_SOURCE: + case MCAST_UNBLOCK_SOURCE: + { +- struct compat_group_source_req __user *gsr32 = (void *)optval; ++ struct compat_group_source_req __user *gsr32 = (void __user *)optval; + struct group_source_req __user *kgsr = compat_alloc_user_space( + sizeof(struct group_source_req)); + u32 interface; +@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + } + case MCAST_MSFILTER: + { +- struct compat_group_filter __user *gf32 = (void *)optval; ++ struct compat_group_filter __user *gf32 = (void __user *)optval; + struct group_filter __user *kgf; + u32 interface, fmode, numsrc; + +diff --git a/net/core/dev.c b/net/core/dev.c +index 84a0705..575db4c 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name) + if (no_module && capable(CAP_NET_ADMIN)) + no_module = request_module("netdev-%s", name); + if (no_module && capable(CAP_SYS_MODULE)) { ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ ___request_module(true, "grsec_modharden_netdev", "%s", name); ++#else + if (!request_module("%s", name)) + pr_err("Loading kernel module for a network device " + "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " + "instead\n", name); ++#endif + } + } + EXPORT_SYMBOL(dev_load); +@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) + + struct dev_gso_cb { + void (*destructor)(struct sk_buff *skb); +-}; ++} __no_const; + + #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) + +@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb) + } + EXPORT_SYMBOL(netif_rx_ni); + +-static void net_tx_action(struct softirq_action *h) ++static void net_tx_action(void) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); + +@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi) + EXPORT_SYMBOL(netif_napi_del); + + +-static void net_rx_action(struct softirq_action *h) ++static void net_rx_action(void) + { + struct list_head *list = &__get_cpu_var(softnet_data).poll_list; + unsigned long time_limit = jiffies + 2; +diff --git a/net/core/flow.c b/net/core/flow.c +index 9601587..8c4824e 100644 +--- a/net/core/flow.c ++++ b/net/core/flow.c +@@ -35,11 +35,11 @@ struct flow_cache_entry { + atomic_t *object_ref; + }; + +-atomic_t flow_cache_genid = ATOMIC_INIT(0); ++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0); + + static u32 flow_hash_shift; + #define flow_hash_size (1 << flow_hash_shift) +-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; ++static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables); + + #define flow_table(cpu) (per_cpu(flow_tables, cpu)) + +@@ -52,7 +52,7 @@ struct flow_percpu_info { + u32 hash_rnd; + int count; + }; +-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 }; ++static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info); + + #define flow_hash_rnd_recalc(cpu) \ + (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) +@@ -69,7 +69,7 @@ struct flow_flush_info { + atomic_t cpuleft; + struct completion completion; + }; +-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; ++static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets); + + #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) + +@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, + if (fle->family == family && + fle->dir == dir && + flow_key_compare(key, &fle->key) == 0) { +- if (fle->genid == atomic_read(&flow_cache_genid)) { ++ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) { + void *ret = fle->object; + + if (ret) +@@ -228,7 +228,7 @@ nocache: + err = resolver(net, key, family, dir, &obj, &obj_ref); + + if (fle && !err) { +- fle->genid = atomic_read(&flow_cache_genid); ++ fle->genid = atomic_read_unchecked(&flow_cache_genid); + + if (fle->object) + atomic_dec(fle->object_ref); +@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data) + + fle = flow_table(cpu)[i]; + for (; fle; fle = fle->next) { +- unsigned genid = atomic_read(&flow_cache_genid); ++ unsigned genid = atomic_read_unchecked(&flow_cache_genid); + + if (!fle->object || fle->genid == genid) + continue; +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index d4fd895..ac9b1e6 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -57,7 +57,7 @@ struct rtnl_link + { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; +-}; ++} __no_const; + + static DEFINE_MUTEX(rtnl_mutex); + +diff --git a/net/core/scm.c b/net/core/scm.c +index d98eafc..1a190a9 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -191,7 +191,7 @@ error: + int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) + { + struct cmsghdr __user *cm +- = (__force struct cmsghdr __user *)msg->msg_control; ++ = (struct cmsghdr __force_user *)msg->msg_control; + struct cmsghdr cmhdr; + int cmlen = CMSG_LEN(len); + int err; +@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) + err = -EFAULT; + if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) + goto out; +- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr))) ++ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr))) + goto out; + cmlen = CMSG_SPACE(len); + if (msg->msg_controllen < cmlen) +@@ -229,7 +229,7 @@ out: + void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) + { + struct cmsghdr __user *cm +- = (__force struct cmsghdr __user*)msg->msg_control; ++ = (struct cmsghdr __force_user *)msg->msg_control; + + int fdmax = 0; + int fdnum = scm->fp->count; +@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) + if (fdnum < fdmax) + fdmax = fdnum; + +- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; ++ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax; + i++, cmfptr++) + { + int new_fd; +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c +index 45329d7..626aaa6 100644 +--- a/net/core/secure_seq.c ++++ b/net/core/secure_seq.c +@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, + EXPORT_SYMBOL(secure_tcpv6_sequence_number); + + u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +- __be16 dport) ++ __be16 dport) + { + u32 secret[MD5_MESSAGE_BYTES / 4]; + u32 hash[MD5_DIGEST_WORDS]; +@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, + secret[i] = net_secret[i]; + + md5_transform(hash, secret); +- + return hash[0]; + } + #endif +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 025f924..a014894 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, + struct sk_buff *frag_iter; + struct sock *sk = skb->sk; + ++ pax_track_stack(); ++ + /* + * __skb_splice_bits() only fails if the output has no room left, + * so no point in going over the frag_list for the error case. +@@ -2989,6 +2991,8 @@ static void sock_rmem_free(struct sk_buff *skb) + */ + int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) + { ++ int len = skb->len; ++ + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) + return -ENOMEM; +@@ -3000,7 +3004,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) + + skb_queue_tail(&sk->sk_error_queue, skb); + if (!sock_flag(sk, SOCK_DEAD)) +- sk->sk_data_ready(sk, skb->len); ++ sk->sk_data_ready(sk, len); + return 0; + } + EXPORT_SYMBOL(sock_queue_err_skb); +diff --git a/net/core/sock.c b/net/core/sock.c +index 6605e75..3acebda 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname, + break; + + case SO_PEERCRED: ++ { ++ struct ucred peercred; + if (len > sizeof(sk->sk_peercred)) + len = sizeof(sk->sk_peercred); +- if (copy_to_user(optval, &sk->sk_peercred, len)) ++ peercred = sk->sk_peercred; ++ if (copy_to_user(optval, &peercred, len)) + return -EFAULT; + goto lenout; ++ } + + case SO_PEERNAME: + { +@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) + */ + smp_wmb(); + atomic_set(&sk->sk_refcnt, 1); +- atomic_set(&sk->sk_drops, 0); ++ atomic_set_unchecked(&sk->sk_drops, 0); + } + EXPORT_SYMBOL(sock_init_data); + +diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c +index 2036568..c55883d 100644 +--- a/net/decnet/sysctl_net_decnet.c ++++ b/net/decnet/sysctl_net_decnet.c +@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write, + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, addr, len)) ++ if (len > sizeof addr || copy_to_user(buffer, addr, len)) + return -EFAULT; + + *lenp = len; +@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write, + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, devname, len)) ++ if (len > sizeof devname || copy_to_user(buffer, devname, len)) + return -EFAULT; + + *lenp = len; +diff --git a/net/econet/Kconfig b/net/econet/Kconfig +index 39a2d29..f39c0fe 100644 +--- a/net/econet/Kconfig ++++ b/net/econet/Kconfig +@@ -4,7 +4,7 @@ + + config ECONET + tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)" +- depends on EXPERIMENTAL && INET ++ depends on EXPERIMENTAL && INET && BROKEN + ---help--- + Econet is a fairly old and slow networking protocol mainly used by + Acorn computers to access file and print servers. It uses native +diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c +index a413b1b..380849c 100644 +--- a/net/ieee802154/dgram.c ++++ b/net/ieee802154/dgram.c +@@ -318,7 +318,7 @@ out: + static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) + { + if (sock_queue_rcv_skb(sk, skb) < 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c +index 30e74ee..bfc6ee0 100644 +--- a/net/ieee802154/raw.c ++++ b/net/ieee802154/raw.c +@@ -206,7 +206,7 @@ out: + static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) + { + if (sock_queue_rcv_skb(sk, skb) < 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c +index dba56d2..acee5d6 100644 +--- a/net/ipv4/inet_diag.c ++++ b/net/ipv4/inet_diag.c +@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk, + r->idiag_retrans = 0; + + r->id.idiag_if = sk->sk_bound_dev_if; ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ r->id.idiag_cookie[0] = 0; ++ r->id.idiag_cookie[1] = 0; ++#else + r->id.idiag_cookie[0] = (u32)(unsigned long)sk; + r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); ++#endif + + r->id.idiag_sport = inet->sport; + r->id.idiag_dport = inet->dport; +@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, + r->idiag_family = tw->tw_family; + r->idiag_retrans = 0; + r->id.idiag_if = tw->tw_bound_dev_if; ++ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ r->id.idiag_cookie[0] = 0; ++ r->id.idiag_cookie[1] = 0; ++#else + r->id.idiag_cookie[0] = (u32)(unsigned long)tw; + r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); ++#endif ++ + r->id.idiag_sport = tw->tw_sport; + r->id.idiag_dport = tw->tw_dport; + r->id.idiag_src[0] = tw->tw_rcv_saddr; +@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, + if (sk == NULL) + goto unlock; + ++#ifndef CONFIG_GRKERNSEC_HIDESYM + err = -ESTALE; + if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE || + req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) && + ((u32)(unsigned long)sk != req->id.idiag_cookie[0] || + (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1])) + goto out; ++#endif + + err = -ENOMEM; + rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + +@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, + r->idiag_retrans = req->retrans; + + r->id.idiag_if = sk->sk_bound_dev_if; ++ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ r->id.idiag_cookie[0] = 0; ++ r->id.idiag_cookie[1] = 0; ++#else + r->id.idiag_cookie[0] = (u32)(unsigned long)req; + r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1); ++#endif + + tmo = req->expires - jiffies; + if (tmo < 0) +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c +index d717267..56de7e7 100644 +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -18,12 +18,15 @@ + #include <linux/sched.h> + #include <linux/slab.h> + #include <linux/wait.h> ++#include <linux/security.h> + + #include <net/inet_connection_sock.h> + #include <net/inet_hashtables.h> + #include <net/secure_seq.h> + #include <net/ip.h> + ++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet); ++ + /* + * Allocate and initialize a new local port bind bucket. + * The bindhash mutex for snum's hash chain must be held here. +@@ -491,6 +494,8 @@ ok: + } + spin_unlock(&head->lock); + ++ gr_update_task_in_ip_table(current, inet_sk(sk)); ++ + if (tw) { + inet_twsk_deschedule(tw, death_row); + inet_twsk_put(tw); +diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c +index 13b229f..6956484 100644 +--- a/net/ipv4/inetpeer.c ++++ b/net/ipv4/inetpeer.c +@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) + struct inet_peer *p, *n; + struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; + ++ pax_track_stack(); ++ + /* Look up for the address quickly. */ + read_lock_bh(&peer_pool_lock); + p = lookup(daddr, NULL); +@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) + return NULL; + n->v4daddr = daddr; + atomic_set(&n->refcnt, 1); +- atomic_set(&n->rid, 0); ++ atomic_set_unchecked(&n->rid, 0); + n->ip_id_count = secure_ip_id(daddr); + n->tcp_ts_stamp = 0; + +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index d3fe10b..feeafc9 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp) + return 0; + + start = qp->rid; +- end = atomic_inc_return(&peer->rid); ++ end = atomic_inc_return_unchecked(&peer->rid); + qp->rid = end; + + rc = qp->q.fragments && (end - start) > max; +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index e982b5c..f079d75 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, + int val; + int len; + ++ pax_track_stack(); ++ + if (level != SOL_IP) + return -EOPNOTSUPP; + +@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, + if (sk->sk_type != SOCK_STREAM) + return -ENOPROTOOPT; + +- msg.msg_control = optval; ++ msg.msg_control = (void __force_kernel *)optval; + msg.msg_controllen = len; + msg.msg_flags = 0; + +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c +index f8d04c2..c1188f2 100644 +--- a/net/ipv4/ipconfig.c ++++ b/net/ipv4/ipconfig.c +@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg); ++ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); + set_fs(oldfs); + return res; + } +@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg); ++ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); + set_fs(oldfs); + return res; + } +@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg); ++ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg); + set_fs(oldfs); + return res; + } +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index c8b0cc3..4da5ae2 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) + private = &tmp; + } + #endif ++ memset(&info, 0, sizeof(info)); + info.valid_hooks = t->valid_hooks; + memcpy(info.hook_entry, private->hook_entry, + sizeof(info.hook_entry)); +diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c +index c156db2..e772975 100644 +--- a/net/ipv4/netfilter/ip_queue.c ++++ b/net/ipv4/netfilter/ip_queue.c +@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e) + + if (v->data_len < sizeof(*user_iph)) + return 0; ++ if (v->data_len > 65535) ++ return -EMSGSIZE; ++ + diff = v->data_len - e->skb->len; + if (diff < 0) { + if (pskb_trim(e->skb, v->data_len)) +@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex) + static inline void + __ipq_rcv_skb(struct sk_buff *skb) + { +- int status, type, pid, flags, nlmsglen, skblen; ++ int status, type, pid, flags; ++ unsigned int nlmsglen, skblen; + struct nlmsghdr *nlh; + + skblen = skb->len; +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index 0606db1..02e7e4c 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) + private = &tmp; + } + #endif ++ memset(&info, 0, sizeof(info)); + info.valid_hooks = t->valid_hooks; + memcpy(info.hook_entry, private->hook_entry, + sizeof(info.hook_entry)); +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c +index ab996f9..3da5f96 100644 +--- a/net/ipv4/raw.c ++++ b/net/ipv4/raw.c +@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) + /* Charge it to the socket. */ + + if (sock_queue_rcv_skb(sk, skb) < 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) + int raw_rcv(struct sock *sk, struct sk_buff *skb) + { + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk) + + static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) + { ++ struct icmp_filter filter; ++ ++ if (optlen < 0) ++ return -EINVAL; + if (optlen > sizeof(struct icmp_filter)) + optlen = sizeof(struct icmp_filter); +- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) ++ if (copy_from_user(&filter, optval, optlen)) + return -EFAULT; ++ raw_sk(sk)->filter = filter; ++ + return 0; + } + + static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) + { + int len, ret = -EFAULT; ++ struct icmp_filter filter; + + if (get_user(len, optlen)) + goto out; +@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o + if (len > sizeof(struct icmp_filter)) + len = sizeof(struct icmp_filter); + ret = -EFAULT; +- if (put_user(len, optlen) || +- copy_to_user(optval, &raw_sk(sk)->filter, len)) ++ filter = raw_sk(sk)->filter; ++ if (put_user(len, optlen) || len > sizeof filter || ++ copy_to_user(optval, &filter, len)) + goto out; + ret = 0; + out: return ret; +@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops)); + } + + static int raw_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 58f141b..b759702 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, + + static inline int rt_genid(struct net *net) + { +- return atomic_read(&net->ipv4.rt_genid); ++ return atomic_read_unchecked(&net->ipv4.rt_genid); + } + + #ifdef CONFIG_PROC_FS +@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net) + unsigned char shuffle; + + get_random_bytes(&shuffle, sizeof(shuffle)); +- atomic_add(shuffle + 1U, &net->ipv4.rt_genid); ++ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid); + } + + /* +@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { + + static __net_init int rt_secret_timer_init(struct net *net) + { +- atomic_set(&net->ipv4.rt_genid, ++ atomic_set_unchecked(&net->ipv4.rt_genid, + (int) ((num_physpages ^ (num_physpages>>8)) ^ + (jiffies ^ (jiffies >> 7)))); + +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index f095659..537313b 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -838,8 +838,7 @@ new_segment: + wait_for_sndbuf: + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + wait_for_memory: +- if (copied) +- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); ++ tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); + + if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) + goto do_error; +@@ -2085,6 +2084,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, + int val; + int err = 0; + ++ pax_track_stack(); ++ + /* This is a string value all the others are int's */ + if (optname == TCP_CONGESTION) { + char name[TCP_CA_NAME_MAX]; +@@ -2355,6 +2356,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level, + struct tcp_sock *tp = tcp_sk(sk); + int val, len; + ++ pax_track_stack(); ++ + if (get_user(len, optlen)) + return -EFAULT; + +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 6fc7961..33bad4a 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -85,6 +85,9 @@ + int sysctl_tcp_tw_reuse __read_mostly; + int sysctl_tcp_low_latency __read_mostly; + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif + + #ifdef CONFIG_TCP_MD5SIG + static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, +@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v4_send_reset(rsk, skb); + discard: + kfree_skb(skb); +@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb) + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; ++ } + + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) + goto discard_and_relse; +@@ -1651,6 +1665,10 @@ no_tcp_socket: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v4_send_reset(NULL, skb); + } + +@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req, + 0, /* non standard timer */ + 0, /* open_requests have no inode */ + atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + req, ++#endif + len); + } + +@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) + sock_i_uid(sk), + icsk->icsk_probes_out, + sock_i_ino(sk), +- atomic_read(&sk->sk_refcnt), sk, ++ atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sk, ++#endif + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, +@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw, + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", + i, src, srcp, dest, destp, tw->tw_substate, 0, 0, + 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, +- atomic_read(&tw->tw_refcnt), tw, len); ++ atomic_read(&tw->tw_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ tw, ++#endif ++ len); + } + + #define TMPSZ 150 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index 4c03598..e09a8e8 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -26,6 +26,10 @@ + #include <net/inet_common.h> + #include <net/xfrm.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + #ifdef CONFIG_SYSCTL + #define SYNC_INIT 0 /* let the user enable it */ + #else +@@ -672,6 +676,10 @@ listen_overflow: + + embryonic_reset: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); ++ ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + if (!(flg & TCP_FLAG_RST)) + req->rsk_ops->send_reset(sk, skb); + +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index af83bdf..ec91cb2 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, + __u8 *md5_hash_location; + int mss; + ++ pax_track_stack(); ++ + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); + if (skb == NULL) + return NULL; +diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c +index 59f5b5e..193860f 100644 +--- a/net/ipv4/tcp_probe.c ++++ b/net/ipv4/tcp_probe.c +@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, + if (cnt + width >= len) + break; + +- if (copy_to_user(buf + cnt, tbuf, width)) ++ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width)) + return -EFAULT; + cnt += width; + } +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 57d5501..a9ed13a 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -21,6 +21,10 @@ + #include <linux/module.h> + #include <net/tcp.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_lastack_retries; ++#endif ++ + int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; + int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; + int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; +@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk) + } + } + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if ((sk->sk_state == TCP_LAST_ACK) && ++ (grsec_lastack_retries > 0) && ++ (grsec_lastack_retries < retry_until)) ++ retry_until = grsec_lastack_retries; ++#endif ++ + if (retransmits_timed_out(sk, retry_until)) { + /* Has it gone just too far? */ + tcp_write_err(sk); +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 8e28770..72105c8 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -86,6 +86,7 @@ + #include <linux/types.h> + #include <linux/fcntl.h> + #include <linux/module.h> ++#include <linux/security.h> + #include <linux/socket.h> + #include <linux/sockios.h> + #include <linux/igmp.h> +@@ -106,6 +107,10 @@ + #include <net/xfrm.h> + #include "udp_impl.h" + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + struct udp_table udp_table; + EXPORT_SYMBOL(udp_table); + +@@ -371,6 +376,9 @@ found: + return s; + } + ++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr); ++ + /* + * This routine is called by the ICMP module when it gets some + * sort of error condition. If err < 0 then the socket should +@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + dport = usin->sin_port; + if (dport == 0) + return -EINVAL; ++ ++ err = gr_search_udp_sendmsg(sk, usin); ++ if (err) ++ return err; + } else { + if (sk->sk_state != TCP_ESTABLISHED) + return -EDESTADDRREQ; ++ ++ err = gr_search_udp_sendmsg(sk, NULL); ++ if (err) ++ return err; ++ + daddr = inet->daddr; + dport = inet->dport; + /* Open fast path for connected socket. +@@ -945,6 +962,10 @@ try_again: + if (!skb) + goto out; + ++ err = gr_search_udp_recvmsg(sk, skb); ++ if (err) ++ goto out_free; ++ + ulen = skb->len - sizeof(struct udphdr); + copied = len; + if (copied > ulen) +@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + if (rc == -ENOMEM) { + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, + is_udplite); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + } + goto drop; + } +@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, + goto csum_error; + + UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); + + /* +@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops), len); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops), len); + } + + int udp4_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 8ac3d09..fc58c5f 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg) + p.iph.ihl = 5; + p.iph.protocol = IPPROTO_IPV6; + p.iph.ttl = 64; +- ifr.ifr_ifru.ifru_data = (__force void __user *)&p; ++ ifr.ifr_ifru.ifru_data = (void __force_user *)&p; + + if (ops->ndo_do_ioctl) { + mm_segment_t oldfs = get_fs(); +diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c +index cc4797d..7cfdfcc 100644 +--- a/net/ipv6/inet6_connection_sock.c ++++ b/net/ipv6/inet6_connection_sock.c +@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, + #ifdef CONFIG_XFRM + { + struct rt6_info *rt = (struct rt6_info *)dst; +- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); ++ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid); + } + #endif + } +@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) + #ifdef CONFIG_XFRM + if (dst) { + struct rt6_info *rt = (struct rt6_info *)dst; +- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { ++ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) { + sk->sk_dst_cache = NULL; + dst_release(dst); + dst = NULL; +diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c +index 093e9b2..f72cddb 100644 +--- a/net/ipv6/inet6_hashtables.c ++++ b/net/ipv6/inet6_hashtables.c +@@ -119,7 +119,7 @@ out: + } + EXPORT_SYMBOL(__inet6_lookup_established); + +-static int inline compute_score(struct sock *sk, struct net *net, ++static inline int compute_score(struct sock *sk, struct net *net, + const unsigned short hnum, + const struct in6_addr *daddr, + const int dif) +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 4f7aaf6..f7acf45 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, + int val, valbool; + int retv = -ENOPROTOOPT; + ++ pax_track_stack(); ++ + if (optval == NULL) + val=0; + else { +@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, + int len; + int val; + ++ pax_track_stack(); ++ + if (ip6_mroute_opt(optname)) + return ip6_mroute_getsockopt(sk, optname, optval, optlen); + +@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, + if (sk->sk_type != SOCK_STREAM) + return -ENOPROTOOPT; + +- msg.msg_control = optval; ++ msg.msg_control = (void __force_kernel *)optval; + msg.msg_controllen = len; + msg.msg_flags = 0; + +diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c +index 1cf3f0c..1d4376f 100644 +--- a/net/ipv6/netfilter/ip6_queue.c ++++ b/net/ipv6/netfilter/ip6_queue.c +@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e) + + if (v->data_len < sizeof(*user_iph)) + return 0; ++ if (v->data_len > 65535) ++ return -EMSGSIZE; ++ + diff = v->data_len - e->skb->len; + if (diff < 0) { + if (pskb_trim(e->skb, v->data_len)) +@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex) + static inline void + __ipq_rcv_skb(struct sk_buff *skb) + { +- int status, type, pid, flags, nlmsglen, skblen; ++ int status, type, pid, flags; ++ unsigned int nlmsglen, skblen; + struct nlmsghdr *nlh; + + skblen = skb->len; +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index 78b5a36..7f37433 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) + private = &tmp; + } + #endif ++ memset(&info, 0, sizeof(info)); + info.valid_hooks = t->valid_hooks; + memcpy(info.hook_entry, private->hook_entry, + sizeof(info.hook_entry)); +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 4f24570..b813b34 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) + { + if ((raw6_sk(sk)->checksum || sk->sk_filter) && + skb_checksum_complete(skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } + + /* Charge it to the socket. */ + if (sock_queue_rcv_skb(sk,skb)<0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) + struct raw6_sock *rp = raw6_sk(sk); + + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) + + if (inet->hdrincl) { + if (skb_checksum_complete(skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -518,7 +518,7 @@ csum_copy_err: + as some normal condition. + */ + err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + goto out; + } + +@@ -600,7 +600,7 @@ out: + return err; + } + +-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, ++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length, + struct flowi *fl, struct rt6_info *rt, + unsigned int flags) + { +@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, + u16 proto; + int err; + ++ pax_track_stack(); ++ + /* Rough check on arithmetic overflow, + better check is made in ip6_append_data(). + */ +@@ -916,12 +918,17 @@ do_confirm: + static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, + char __user *optval, int optlen) + { ++ struct icmp6_filter filter; ++ + switch (optname) { + case ICMPV6_FILTER: ++ if (optlen < 0) ++ return -EINVAL; + if (optlen > sizeof(struct icmp6_filter)) + optlen = sizeof(struct icmp6_filter); +- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) ++ if (copy_from_user(&filter, optval, optlen)) + return -EFAULT; ++ raw6_sk(sk)->filter = filter; + return 0; + default: + return -ENOPROTOOPT; +@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen) + { + int len; ++ struct icmp6_filter filter; + + switch (optname) { + case ICMPV6_FILTER: +@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, + len = sizeof(struct icmp6_filter); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) ++ filter = raw6_sk(sk)->filter; ++ if (len > sizeof filter || copy_to_user(optval, &filter, len)) + return -EFAULT; + return 0; + default: +@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) + 0, 0L, 0, + sock_i_uid(sp), 0, + sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops)); + } + + static int raw6_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index faae6df..d4430c1 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, + } + #endif + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + static void tcp_v6_hash(struct sock *sk) + { + if (sk->sk_state != TCP_CLOSE) { +@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v6_send_reset(sk, skb); + discard: + if (opt_skb) +@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb) + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; ++ } + + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) + goto discard_and_relse; +@@ -1701,6 +1716,10 @@ no_tcp_socket: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v6_send_reset(NULL, skb); + } + +@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq, + uid, + 0, /* non standard timer */ + 0, /* open_requests have no inode */ +- 0, req); ++ 0, ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL ++#else ++ req ++#endif ++ ); + } + + static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) +@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) + sock_i_uid(sp), + icsk->icsk_probes_out, + sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, +@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq, + dest->s6_addr32[2], dest->s6_addr32[3], destp, + tw->tw_substate, 0, 0, + 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, +- atomic_read(&tw->tw_refcnt), tw); ++ atomic_read(&tw->tw_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL ++#else ++ tw ++#endif ++ ); + } + + static int tcp6_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 9cc6289..052c521 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -49,6 +49,10 @@ + #include <linux/seq_file.h> + #include "udp_impl.h" + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) + { + const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; +@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) + if (rc == -ENOMEM) { + UDP6_INC_STATS_BH(sock_net(sk), + UDP_MIB_RCVBUFERRORS, is_udplite); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + } + goto drop; + } +@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, + UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, + proto == IPPROTO_UDPLITE); + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); + + kfree_skb(skb); +@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket + 0, 0L, 0, + sock_i_uid(sp), 0, + sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops)); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops)); + } + + int udp6_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c +index 48bb1e3..5980e6e 100644 +--- a/net/ipv6/xfrm6_tunnel.c ++++ b/net/ipv6/xfrm6_tunnel.c +@@ -258,7 +258,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb) + __be32 spi; + + spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); +- return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; ++ return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi); + } + + static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, +diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c +index 811984d..11f59b7 100644 +--- a/net/irda/ircomm/ircomm_tty.c ++++ b/net/irda/ircomm/ircomm_tty.c +@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + add_wait_queue(&self->open_wait, &wait); + + IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count ); ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) ); + + /* As far as I can see, we protect open_count - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = 1; +- self->open_count--; ++ local_dec(&self->open_count); + } + spin_unlock_irqrestore(&self->spinlock, flags); +- self->blocked_open++; ++ local_inc(&self->blocked_open); + + while (1) { + if (tty->termios->c_cflag & CBAUD) { +@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + } + + IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count ); ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) ); + + schedule(); + } +@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + if (extra_count) { + /* ++ is not atomic, so this should be protected - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); +- self->open_count++; ++ local_inc(&self->open_count); + spin_unlock_irqrestore(&self->spinlock, flags); + } +- self->blocked_open--; ++ local_dec(&self->blocked_open); + + IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count); ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count)); + + if (!retval) + self->flags |= ASYNC_NORMAL_ACTIVE; +@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) + } + /* ++ is not atomic, so this should be protected - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); +- self->open_count++; ++ local_inc(&self->open_count); + + tty->driver_data = self; + self->tty = tty; + spin_unlock_irqrestore(&self->spinlock, flags); + + IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, +- self->line, self->open_count); ++ self->line, local_read(&self->open_count)); + + /* Not really used by us, but lets do it anyway */ + self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; +@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) + return; + } + +- if ((tty->count == 1) && (self->open_count != 1)) { ++ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty + * structure will be freed. state->count should always +@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) + */ + IRDA_DEBUG(0, "%s(), bad serial port count; " + "tty->count is 1, state->count is %d\n", __func__ , +- self->open_count); +- self->open_count = 1; ++ local_read(&self->open_count)); ++ local_set(&self->open_count, 1); + } + +- if (--self->open_count < 0) { ++ if (local_dec_return(&self->open_count) < 0) { + IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", +- __func__, self->line, self->open_count); +- self->open_count = 0; ++ __func__, self->line, local_read(&self->open_count)); ++ local_set(&self->open_count, 0); + } +- if (self->open_count) { ++ if (local_read(&self->open_count)) { + spin_unlock_irqrestore(&self->spinlock, flags); + + IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); +@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) + tty->closing = 0; + self->tty = NULL; + +- if (self->blocked_open) { ++ if (local_read(&self->blocked_open)) { + if (self->close_delay) + schedule_timeout_interruptible(self->close_delay); + wake_up_interruptible(&self->open_wait); +@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) + spin_lock_irqsave(&self->spinlock, flags); + self->flags &= ~ASYNC_NORMAL_ACTIVE; + self->tty = NULL; +- self->open_count = 0; ++ local_set(&self->open_count, 0); + spin_unlock_irqrestore(&self->spinlock, flags); + + wake_up_interruptible(&self->open_wait); +@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) + seq_putc(m, '\n'); + + seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); +- seq_printf(m, "Open count: %d\n", self->open_count); ++ seq_printf(m, "Open count: %d\n", local_read(&self->open_count)); + seq_printf(m, "Max data size: %d\n", self->max_data_size); + seq_printf(m, "Max header size: %d\n", self->max_header_size); + +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c +index bada1b9..f325943 100644 +--- a/net/iucv/af_iucv.c ++++ b/net/iucv/af_iucv.c +@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk) + + write_lock_bh(&iucv_sk_list.lock); + +- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); ++ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); + while (__iucv_get_sock_by_name(name)) { + sprintf(name, "%08x", +- atomic_inc_return(&iucv_sk_list.autobind_name)); ++ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); + } + + write_unlock_bh(&iucv_sk_list.lock); +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 4e98193..439b449 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, + struct xfrm_migrate m[XFRM_MAX_DEPTH]; + struct xfrm_kmaddress k; + ++ pax_track_stack(); ++ + if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1], + ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) || + !ext_hdrs[SADB_X_EXT_POLICY - 1]) { +@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v) + seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); + else + seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + atomic_read(&s->sk_refcnt), + sk_rmem_alloc_get(s), + sk_wmem_alloc_get(s), +diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c +index bda96d1..c038b72 100644 +--- a/net/lapb/lapb_iface.c ++++ b/net/lapb/lapb_iface.c +@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks + goto out; + + lapb->dev = dev; +- lapb->callbacks = *callbacks; ++ lapb->callbacks = callbacks; + + __lapb_insert_cb(lapb); + +@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb) + + void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) + { +- if (lapb->callbacks.connect_confirmation) +- lapb->callbacks.connect_confirmation(lapb->dev, reason); ++ if (lapb->callbacks->connect_confirmation) ++ lapb->callbacks->connect_confirmation(lapb->dev, reason); + } + + void lapb_connect_indication(struct lapb_cb *lapb, int reason) + { +- if (lapb->callbacks.connect_indication) +- lapb->callbacks.connect_indication(lapb->dev, reason); ++ if (lapb->callbacks->connect_indication) ++ lapb->callbacks->connect_indication(lapb->dev, reason); + } + + void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason) + { +- if (lapb->callbacks.disconnect_confirmation) +- lapb->callbacks.disconnect_confirmation(lapb->dev, reason); ++ if (lapb->callbacks->disconnect_confirmation) ++ lapb->callbacks->disconnect_confirmation(lapb->dev, reason); + } + + void lapb_disconnect_indication(struct lapb_cb *lapb, int reason) + { +- if (lapb->callbacks.disconnect_indication) +- lapb->callbacks.disconnect_indication(lapb->dev, reason); ++ if (lapb->callbacks->disconnect_indication) ++ lapb->callbacks->disconnect_indication(lapb->dev, reason); + } + + int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) + { +- if (lapb->callbacks.data_indication) +- return lapb->callbacks.data_indication(lapb->dev, skb); ++ if (lapb->callbacks->data_indication) ++ return lapb->callbacks->data_indication(lapb->dev, skb); + + kfree_skb(skb); + return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ +@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) + { + int used = 0; + +- if (lapb->callbacks.data_transmit) { +- lapb->callbacks.data_transmit(lapb->dev, skb); ++ if (lapb->callbacks->data_transmit) { ++ lapb->callbacks->data_transmit(lapb->dev, skb); + used = 1; + } + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index fe2d3f8..e57f683 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, + return err; + } + +-struct cfg80211_ops mac80211_config_ops = { ++const struct cfg80211_ops mac80211_config_ops = { + .add_virtual_intf = ieee80211_add_iface, + .del_virtual_intf = ieee80211_del_iface, + .change_virtual_intf = ieee80211_change_iface, +diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h +index 7d7879f..2d51f62 100644 +--- a/net/mac80211/cfg.h ++++ b/net/mac80211/cfg.h +@@ -4,6 +4,6 @@ + #ifndef __CFG_H + #define __CFG_H + +-extern struct cfg80211_ops mac80211_config_ops; ++extern const struct cfg80211_ops mac80211_config_ops; + + #endif /* __CFG_H */ +diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c +index 99c7525..9cb4937 100644 +--- a/net/mac80211/debugfs_key.c ++++ b/net/mac80211/debugfs_key.c +@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) + { + struct ieee80211_key *key = file->private_data; +- int i, res, bufsize = 2 * key->conf.keylen + 2; ++ int i, bufsize = 2 * key->conf.keylen + 2; + char *buf = kmalloc(bufsize, GFP_KERNEL); + char *p = buf; ++ ssize_t res; ++ ++ if (buf == NULL) ++ return -ENOMEM; + + for (i = 0; i < key->conf.keylen; i++) + p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); +diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c +index 33a2e89..08650c8 100644 +--- a/net/mac80211/debugfs_sta.c ++++ b/net/mac80211/debugfs_sta.c +@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, + int i; + struct sta_info *sta = file->private_data; + ++ pax_track_stack(); ++ + spin_lock_bh(&sta->lock); + p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n", + sta->ampdu_mlme.dialog_token_allocator + 1); +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index ca62bfe..6657a03 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -25,6 +25,7 @@ + #include <linux/etherdevice.h> + #include <net/cfg80211.h> + #include <net/mac80211.h> ++#include <asm/local.h> + #include "key.h" + #include "sta_info.h" + +@@ -635,7 +636,7 @@ struct ieee80211_local { + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + +- int open_count; ++ local_t open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll; +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index 079c500..eb3c6d4 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev) + break; + } + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + res = drv_start(local); + if (res) + goto err_del_bss; +@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev) + * Validate the MAC address for this device. + */ + if (!is_valid_ether_addr(dev->dev_addr)) { +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + drv_stop(local); + return -EADDRNOTAVAIL; + } +@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev) + + hw_reconf_flags |= __ieee80211_recalc_idle(local); + +- local->open_count++; ++ local_inc(&local->open_count); + if (hw_reconf_flags) { + ieee80211_hw_config(local, hw_reconf_flags); + /* +@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev) + err_del_interface: + drv_remove_interface(local, &conf); + err_stop: +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; +@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev) + WARN_ON(!list_empty(&sdata->u.ap.vlans)); + } + +- local->open_count--; ++ local_dec(&local->open_count); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: +@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev) + + ieee80211_recalc_ps(local, -1); + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + ieee80211_clear_tx_pending(local); + ieee80211_stop_device(local); + +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index 2dfe176..74e4388 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) + local->hw.conf.power_level = power; + } + +- if (changed && local->open_count) { ++ if (changed && local_read(&local->open_count)) { + ret = drv_config(local, changed); + /* + * Goal: +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index e67eea7..fcc227e 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, + bool have_higher_than_11mbit = false, newsta = false; + u16 ap_ht_cap_flags; + ++ pax_track_stack(); ++ + /* + * AssocResp and ReassocResp have identical structure, so process both + * of them in this function. +diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c +index e535f1c..4d733d1 100644 +--- a/net/mac80211/pm.c ++++ b/net/mac80211/pm.c +@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) + } + + /* stop hardware - this must stop RX */ +- if (local->open_count) ++ if (local_read(&local->open_count)) + ieee80211_stop_device(local); + + local->suspended = true; +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index b33efc4..0a2efb6 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + struct rate_control_ref *ref, *old; + + ASSERT_RTNL(); +- if (local->open_count) ++ if (local_read(&local->open_count)) + return -EBUSY; + + ref = rate_control_alloc(name, local); +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index b1d7904..57e4da7 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, + return cpu_to_le16(dur); + } + +-static int inline is_ieee80211_device(struct ieee80211_local *local, ++static inline int is_ieee80211_device(struct ieee80211_local *local, + struct net_device *dev) + { + return local == wdev_priv(dev->ieee80211_ptr); +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index 31b1085..48fb26d 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) + local->resuming = true; + + /* restart hardware */ +- if (local->open_count) { ++ if (local_read(&local->open_count)) { + /* + * Upon resume hardware can sometimes be goofy due to + * various platform / driver / bus issues, so restarting +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig +index 634d14a..b35a608 100644 +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_MATCH_GRADM ++ tristate '"gradm" match support' ++ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED ++ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC ++ ---help--- ++ The gradm match allows to match on grsecurity RBAC being enabled. ++ It is useful when iptables rules are applied early on bootup to ++ prevent connections to the machine (except from a trusted host) ++ while the RBAC system is disabled. ++ + config NETFILTER_XT_MATCH_HASHLIMIT + tristate '"hashlimit" match support' + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) +diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile +index 49f62ee..a17b2c6 100644 +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o + obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o + obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o + obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o ++obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o +diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c +index 3c7e427..724043c 100644 +--- a/net/netfilter/ipvs/ip_vs_app.c ++++ b/net/netfilter/ipvs/ip_vs_app.c +@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = { + .open = ip_vs_app_open, + .read = seq_read, + .llseek = seq_lseek, +- .release = seq_release, ++ .release = seq_release_net, + }; + #endif + +diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c +index 95682e5..457dbac 100644 +--- a/net/netfilter/ipvs/ip_vs_conn.c ++++ b/net/netfilter/ipvs/ip_vs_conn.c +@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) + /* if the connection is not template and is created + * by sync, preserve the activity flag. + */ +- cp->flags |= atomic_read(&dest->conn_flags) & ++ cp->flags |= atomic_read_unchecked(&dest->conn_flags) & + (~IP_VS_CONN_F_INACTIVE); + else +- cp->flags |= atomic_read(&dest->conn_flags); ++ cp->flags |= atomic_read_unchecked(&dest->conn_flags); + cp->dest = dest; + + IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " +@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport, + atomic_set(&cp->refcnt, 1); + + atomic_set(&cp->n_control, 0); +- atomic_set(&cp->in_pkts, 0); ++ atomic_set_unchecked(&cp->in_pkts, 0); + + atomic_inc(&ip_vs_conn_count); + if (flags & IP_VS_CONN_F_NO_CPORT) +@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = { + .open = ip_vs_conn_open, + .read = seq_read, + .llseek = seq_lseek, +- .release = seq_release, ++ .release = seq_release_net, + }; + + static const char *ip_vs_origin_name(unsigned flags) +@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = { + .open = ip_vs_conn_sync_open, + .read = seq_read, + .llseek = seq_lseek, +- .release = seq_release, ++ .release = seq_release_net, + }; + + #endif +@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp) + + /* Don't drop the entry if its number of incoming packets is not + located in [0, 8] */ +- i = atomic_read(&cp->in_pkts); ++ i = atomic_read_unchecked(&cp->in_pkts); + if (i > 8 || i < 0) return 0; + + if (!todrop_rate[i]) return 0; +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index b95699f..5fee919 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, + ret = cp->packet_xmit(skb, cp, pp); + /* do not touch skb anymore */ + +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + ip_vs_conn_put(cp); + return ret; + } +@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, + * Sync connection if it is about to close to + * encorage the standby servers to update the connections timeout + */ +- pkts = atomic_add_return(1, &cp->in_pkts); ++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts); + if (af == AF_INET && + (ip_vs_sync_state & IP_VS_STATE_MASTER) && + (((cp->protocol != IPPROTO_TCP || +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index 02b2610..2d89424 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, + ip_vs_rs_hash(dest); + write_unlock_bh(&__ip_vs_rs_lock); + } +- atomic_set(&dest->conn_flags, conn_flags); ++ atomic_set_unchecked(&dest->conn_flags, conn_flags); + + /* bind the service */ + if (!dest->svc) { +@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) + " %-7s %-6d %-10d %-10d\n", + &dest->addr.in6, + ntohs(dest->port), +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), + atomic_read(&dest->weight), + atomic_read(&dest->activeconns), + atomic_read(&dest->inactconns)); +@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) + "%-7s %-6d %-10d %-10d\n", + ntohl(dest->addr.ip), + ntohs(dest->port), +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), + atomic_read(&dest->weight), + atomic_read(&dest->activeconns), + atomic_read(&dest->inactconns)); +@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = { + .open = ip_vs_info_open, + .read = seq_read, + .llseek = seq_lseek, +- .release = seq_release_private, ++ .release = seq_release_net, + }; + + #endif +@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = { + .open = ip_vs_stats_seq_open, + .read = seq_read, + .llseek = seq_lseek, +- .release = single_release, ++ .release = single_release_net, + }; + + #endif +@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get, + + entry.addr = dest->addr.ip; + entry.port = dest->port; +- entry.conn_flags = atomic_read(&dest->conn_flags); ++ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags); + entry.weight = atomic_read(&dest->weight); + entry.u_threshold = dest->u_threshold; + entry.l_threshold = dest->l_threshold; +@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + unsigned char arg[128]; + int ret = 0; + ++ pax_track_stack(); ++ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + +@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) + NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); + + NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, +- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); ++ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); + NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); + NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); + NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c +index e177f0d..55e8581 100644 +--- a/net/netfilter/ipvs/ip_vs_sync.c ++++ b/net/netfilter/ipvs/ip_vs_sync.c +@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) + + if (opt) + memcpy(&cp->in_seq, opt, sizeof(*opt)); +- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); ++ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); + cp->state = state; + cp->old_state = cp->state; + /* +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index 30b3189..e2e4b55 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + else + rc = NF_ACCEPT; + /* do not touch skb anymore */ +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + goto out; + } + +@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + else + rc = NF_ACCEPT; + /* do not touch skb anymore */ +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + goto out; + } + +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index d521718..d0fd7a1 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr, + static int + ctnetlink_parse_tuple(const struct nlattr * const cda[], + struct nf_conntrack_tuple *tuple, +- enum ctattr_tuple type, u_int8_t l3num) ++ enum ctattr_type type, u_int8_t l3num) + { + struct nlattr *tb[CTA_TUPLE_MAX+1]; + int err; +diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c +index f900dc3..5e45346 100644 +--- a/net/netfilter/nfnetlink_log.c ++++ b/net/netfilter/nfnetlink_log.c +@@ -68,7 +68,7 @@ struct nfulnl_instance { + }; + + static DEFINE_RWLOCK(instances_lock); +-static atomic_t global_seq; ++static atomic_unchecked_t global_seq; + + #define INSTANCE_BUCKETS 16 + static struct hlist_head instance_table[INSTANCE_BUCKETS]; +@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst, + /* global sequence number */ + if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) + NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, +- htonl(atomic_inc_return(&global_seq))); ++ htonl(atomic_inc_return_unchecked(&global_seq))); + + if (data_len) { + struct nlattr *nla; +diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c +new file mode 100644 +index 0000000..b1bac76 +--- /dev/null ++++ b/net/netfilter/xt_gradm.c +@@ -0,0 +1,51 @@ ++/* ++ * gradm match for netfilter ++ * Copyright © Zbigniew Krzystolik, 2010 ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License; either version ++ * 2 or 3 as published by the Free Software Foundation. ++ */ ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/skbuff.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/grsecurity.h> ++#include <linux/netfilter/xt_gradm.h> ++ ++static bool ++gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par) ++{ ++ const struct xt_gradm_mtinfo *info = par->matchinfo; ++ bool retval = false; ++ if (gr_acl_is_enabled()) ++ retval = true; ++ return retval ^ info->invflags; ++} ++ ++static struct xt_match gradm_mt_reg __read_mostly = { ++ .name = "gradm", ++ .revision = 0, ++ .family = NFPROTO_UNSPEC, ++ .match = gradm_mt, ++ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)), ++ .me = THIS_MODULE, ++}; ++ ++static int __init gradm_mt_init(void) ++{ ++ return xt_register_match(&gradm_mt_reg); ++} ++ ++static void __exit gradm_mt_exit(void) ++{ ++ xt_unregister_match(&gradm_mt_reg); ++} ++ ++module_init(gradm_mt_init); ++module_exit(gradm_mt_exit); ++MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>"); ++MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("ipt_gradm"); ++MODULE_ALIAS("ip6t_gradm"); +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 5a7dcdf..24a3578 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk) + sk->sk_error_report(sk); + } + } +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + } + + static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) +@@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v) + struct netlink_sock *nlk = nlk_sk(s); + + seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + s->sk_protocol, + nlk->pid, + nlk->groups ? (u32)nlk->groups[0] : 0, + sk_rmem_alloc_get(s), + sk_wmem_alloc_get(s), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + nlk->cb, ++#endif + atomic_read(&s->sk_refcnt), +- atomic_read(&s->sk_drops) ++ atomic_read_unchecked(&s->sk_drops) + ); + + } +diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c +index 7a83495..ab0062f 100644 +--- a/net/netrom/af_netrom.c ++++ b/net/netrom/af_netrom.c +@@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, + struct sock *sk = sock->sk; + struct nr_sock *nr = nr_sk(sk); + ++ memset(sax, 0, sizeof(*sax)); + lock_sock(sk); + if (peer != 0) { + if (sk->sk_state != TCP_ESTABLISHED) { +@@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, + *uaddr_len = sizeof(struct full_sockaddr_ax25); + } else { + sax->fsa_ax25.sax25_family = AF_NETROM; +- sax->fsa_ax25.sax25_ndigis = 0; + sax->fsa_ax25.sax25_call = nr->source_addr; + *uaddr_len = sizeof(struct sockaddr_ax25); + } +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 35cfa79..4e78ff7 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v) + + seq_printf(seq, + "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + atomic_read(&s->sk_refcnt), + s->sk_type, + ntohs(po->num), +diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c +index 519ff9d..a422a90 100644 +--- a/net/phonet/af_phonet.c ++++ b/net/phonet/af_phonet.c +@@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol) + { + struct phonet_protocol *pp; + +- if (protocol >= PHONET_NPROTO) ++ if (protocol < 0 || protocol >= PHONET_NPROTO) + return NULL; + + spin_lock(&proto_tab_lock); +@@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol, + { + int err = 0; + +- if (protocol >= PHONET_NPROTO) ++ if (protocol < 0 || protocol >= PHONET_NPROTO) + return -EINVAL; + + err = proto_register(pp->prot, 1); +diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c +index ef5c75c..2b6c2fa 100644 +--- a/net/phonet/datagram.c ++++ b/net/phonet/datagram.c +@@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) + if (err < 0) { + kfree_skb(skb); + if (err == -ENOMEM) +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + } + return err ? NET_RX_DROP : NET_RX_SUCCESS; + } +diff --git a/net/phonet/pep.c b/net/phonet/pep.c +index 9cdd35e..16cd850 100644 +--- a/net/phonet/pep.c ++++ b/net/phonet/pep.c +@@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) + + case PNS_PEP_CTRL_REQ: + if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + break; + } + __skb_pull(skb, 4); +@@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) + if (!err) + return 0; + if (err == -ENOMEM) +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + break; + } + + if (pn->rx_credits == 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + err = -ENOBUFS; + break; + } +diff --git a/net/phonet/socket.c b/net/phonet/socket.c +index aa5b5a9..c09b4f8 100644 +--- a/net/phonet/socket.c ++++ b/net/phonet/socket.c +@@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) + sk->sk_state, + sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), + sock_i_uid(sk), sock_i_ino(sk), +- atomic_read(&sk->sk_refcnt), sk, +- atomic_read(&sk->sk_drops), &len); ++ atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sk, ++#endif ++ atomic_read_unchecked(&sk->sk_drops), &len); + } + seq_printf(seq, "%*s\n", 127 - len, ""); + return 0; +diff --git a/net/rds/Kconfig b/net/rds/Kconfig +index ec753b3..821187c 100644 +--- a/net/rds/Kconfig ++++ b/net/rds/Kconfig +@@ -1,7 +1,7 @@ + + config RDS + tristate "The RDS Protocol (EXPERIMENTAL)" +- depends on INET && EXPERIMENTAL ++ depends on INET && EXPERIMENTAL && BROKEN + ---help--- + The RDS (Reliable Datagram Sockets) protocol provides reliable, + sequenced delivery of datagrams over Infiniband, iWARP, +diff --git a/net/rds/cong.c b/net/rds/cong.c +index dd2711d..1c7ed12 100644 +--- a/net/rds/cong.c ++++ b/net/rds/cong.c +@@ -77,7 +77,7 @@ + * finds that the saved generation number is smaller than the global generation + * number, it wakes up the process. + */ +-static atomic_t rds_cong_generation = ATOMIC_INIT(0); ++static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0); + + /* + * Congestion monitoring +@@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) + rdsdebug("waking map %p for %pI4\n", + map, &map->m_addr); + rds_stats_inc(s_cong_update_received); +- atomic_inc(&rds_cong_generation); ++ atomic_inc_unchecked(&rds_cong_generation); + if (waitqueue_active(&map->m_waitq)) + wake_up(&map->m_waitq); + if (waitqueue_active(&rds_poll_waitq)) +@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated); + + int rds_cong_updated_since(unsigned long *recent) + { +- unsigned long gen = atomic_read(&rds_cong_generation); ++ unsigned long gen = atomic_read_unchecked(&rds_cong_generation); + + if (likely(*recent == gen)) + return 0; +diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c +index de4a1b1..94ec861 100644 +--- a/net/rds/iw_rdma.c ++++ b/net/rds/iw_rdma.c +@@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i + struct rdma_cm_id *pcm_id; + int rc; + ++ pax_track_stack(); ++ + src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; + dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; + +diff --git a/net/rds/tcp.c b/net/rds/tcp.c +index b5198ae..8b9fb90 100644 +--- a/net/rds/tcp.c ++++ b/net/rds/tcp.c +@@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock) + int val = 1; + + set_fs(KERNEL_DS); +- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val, ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val, + sizeof(val)); + set_fs(oldfs); + } +diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c +index ab545e0..4079b3b 100644 +--- a/net/rds/tcp_send.c ++++ b/net/rds/tcp_send.c +@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val) + + oldfs = get_fs(); + set_fs(KERNEL_DS); +- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val, + sizeof(val)); + set_fs(oldfs); + } +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index a86afce..8657bce 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops; + __be32 rxrpc_epoch; + + /* current debugging ID */ +-atomic_t rxrpc_debug_id; ++atomic_unchecked_t rxrpc_debug_id; + + /* count of skbs currently in use */ + atomic_t rxrpc_n_skbs; +diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c +index b4a2209..539106c 100644 +--- a/net/rxrpc/ar-ack.c ++++ b/net/rxrpc/ar-ack.c +@@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call) + + _enter("{%d,%d,%d,%d},", + call->acks_hard, call->acks_unacked, +- atomic_read(&call->sequence), ++ atomic_read_unchecked(&call->sequence), + CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); + + stop = 0; +@@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call) + + /* each Tx packet has a new serial number */ + sp->hdr.serial = +- htonl(atomic_inc_return(&call->conn->serial)); ++ htonl(atomic_inc_return_unchecked(&call->conn->serial)); + + hdr = (struct rxrpc_header *) txb->head; + hdr->serial = sp->hdr.serial; +@@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) + */ + static void rxrpc_clear_tx_window(struct rxrpc_call *call) + { +- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); ++ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence)); + } + + /* +@@ -627,7 +627,7 @@ process_further: + + latest = ntohl(sp->hdr.serial); + hard = ntohl(ack.firstPacket); +- tx = atomic_read(&call->sequence); ++ tx = atomic_read_unchecked(&call->sequence); + + _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + latest, +@@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work) + u32 abort_code = RX_PROTOCOL_ERROR; + u8 *acks = NULL; + ++ pax_track_stack(); ++ + //printk("\n--------------------\n"); + _enter("{%d,%s,%lx} [%lu]", + call->debug_id, rxrpc_call_states[call->state], call->events, +@@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work) + goto maybe_reschedule; + + send_ACK_with_skew: +- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - ++ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) - + ntohl(ack.serial)); + send_ACK: + mtu = call->conn->trans->peer->if_mtu; +@@ -1171,7 +1173,7 @@ send_ACK: + ackinfo.rxMTU = htonl(5692); + ackinfo.jumbo_max = htonl(4); + +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); + _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + ntohl(hdr.serial), + ntohs(ack.maxSkew), +@@ -1189,7 +1191,7 @@ send_ACK: + send_message: + _debug("send message"); + +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); + _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial)); + send_message_2: + +diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c +index bc0019f..e1b4b24 100644 +--- a/net/rxrpc/ar-call.c ++++ b/net/rxrpc/ar-call.c +@@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) + spin_lock_init(&call->lock); + rwlock_init(&call->state_lock); + atomic_set(&call->usage, 1); +- call->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; + + memset(&call->sock_node, 0xed, sizeof(call->sock_node)); +diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c +index 9f1ce84..ff8d061 100644 +--- a/net/rxrpc/ar-connection.c ++++ b/net/rxrpc/ar-connection.c +@@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) + rwlock_init(&conn->lock); + spin_lock_init(&conn->state_lock); + atomic_set(&conn->usage, 1); +- conn->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + conn->avail_calls = RXRPC_MAXCALLS; + conn->size_align = 4; + conn->header_size = sizeof(struct rxrpc_header); +diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c +index 0505cdc..f0748ce 100644 +--- a/net/rxrpc/ar-connevent.c ++++ b/net/rxrpc/ar-connevent.c +@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, + + len = iov[0].iov_len + iov[1].iov_len; + +- hdr.serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); +diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c +index f98c802..9e8488e 100644 +--- a/net/rxrpc/ar-input.c ++++ b/net/rxrpc/ar-input.c +@@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) + /* track the latest serial number on this connection for ACK packet + * information */ + serial = ntohl(sp->hdr.serial); +- hi_serial = atomic_read(&call->conn->hi_serial); ++ hi_serial = atomic_read_unchecked(&call->conn->hi_serial); + while (serial > hi_serial) +- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, ++ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial, + serial); + + /* request ACK generation for any ACK or DATA packet that requests +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h +index 7043b29..06edcdf 100644 +--- a/net/rxrpc/ar-internal.h ++++ b/net/rxrpc/ar-internal.h +@@ -272,8 +272,8 @@ struct rxrpc_connection { + int error; /* error code for local abort */ + int debug_id; /* debug ID for printks */ + unsigned call_counter; /* call ID counter */ +- atomic_t serial; /* packet serial number counter */ +- atomic_t hi_serial; /* highest serial number received */ ++ atomic_unchecked_t serial; /* packet serial number counter */ ++ atomic_unchecked_t hi_serial; /* highest serial number received */ + u8 avail_calls; /* number of calls available */ + u8 size_align; /* data size alignment (for security) */ + u8 header_size; /* rxrpc + security header size */ +@@ -346,7 +346,7 @@ struct rxrpc_call { + spinlock_t lock; + rwlock_t state_lock; /* lock for state transition */ + atomic_t usage; +- atomic_t sequence; /* Tx data packet sequence counter */ ++ atomic_unchecked_t sequence; /* Tx data packet sequence counter */ + u32 abort_code; /* local/remote abort code */ + enum { /* current state of call */ + RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ +@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) + */ + extern atomic_t rxrpc_n_skbs; + extern __be32 rxrpc_epoch; +-extern atomic_t rxrpc_debug_id; ++extern atomic_unchecked_t rxrpc_debug_id; + extern struct workqueue_struct *rxrpc_workqueue; + + /* +diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c +index 74697b2..10f9b77 100644 +--- a/net/rxrpc/ar-key.c ++++ b/net/rxrpc/ar-key.c +@@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, + return ret; + + plen -= sizeof(*token); +- token = kmalloc(sizeof(*token), GFP_KERNEL); ++ token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + return -ENOMEM; + +- token->kad = kmalloc(plen, GFP_KERNEL); ++ token->kad = kzalloc(plen, GFP_KERNEL); + if (!token->kad) { + kfree(token); + return -ENOMEM; +@@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) + goto error; + + ret = -ENOMEM; +- token = kmalloc(sizeof(*token), GFP_KERNEL); ++ token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + goto error; +- token->kad = kmalloc(plen, GFP_KERNEL); ++ token->kad = kzalloc(plen, GFP_KERNEL); + if (!token->kad) + goto error_free; + +diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c +index 807535f..5b7f19e 100644 +--- a/net/rxrpc/ar-local.c ++++ b/net/rxrpc/ar-local.c +@@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) + spin_lock_init(&local->lock); + rwlock_init(&local->services_lock); + atomic_set(&local->usage, 1); +- local->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + memcpy(&local->srx, srx, sizeof(*srx)); + } + +diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c +index cc9102c..7d3888e 100644 +--- a/net/rxrpc/ar-output.c ++++ b/net/rxrpc/ar-output.c +@@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb, + sp->hdr.cid = call->cid; + sp->hdr.callNumber = call->call_id; + sp->hdr.seq = +- htonl(atomic_inc_return(&call->sequence)); ++ htonl(atomic_inc_return_unchecked(&call->sequence)); + sp->hdr.serial = +- htonl(atomic_inc_return(&conn->serial)); ++ htonl(atomic_inc_return_unchecked(&conn->serial)); + sp->hdr.type = RXRPC_PACKET_TYPE_DATA; + sp->hdr.userStatus = 0; + sp->hdr.securityIndex = conn->security_ix; +diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c +index edc026c..4bd4e2d 100644 +--- a/net/rxrpc/ar-peer.c ++++ b/net/rxrpc/ar-peer.c +@@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, + INIT_LIST_HEAD(&peer->error_targets); + spin_lock_init(&peer->lock); + atomic_set(&peer->usage, 1); +- peer->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + memcpy(&peer->srx, srx, sizeof(*srx)); + + rxrpc_assess_MTU_size(peer); +diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c +index 38047f7..9f48511 100644 +--- a/net/rxrpc/ar-proc.c ++++ b/net/rxrpc/ar-proc.c +@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) + atomic_read(&conn->usage), + rxrpc_conn_states[conn->state], + key_serial(conn->key), +- atomic_read(&conn->serial), +- atomic_read(&conn->hi_serial)); ++ atomic_read_unchecked(&conn->serial), ++ atomic_read_unchecked(&conn->hi_serial)); + + return 0; + } +diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c +index 0936e1a..437c640 100644 +--- a/net/rxrpc/ar-transport.c ++++ b/net/rxrpc/ar-transport.c +@@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, + spin_lock_init(&trans->client_lock); + rwlock_init(&trans->conn_lock); + atomic_set(&trans->usage, 1); +- trans->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + + if (peer->srx.transport.family == AF_INET) { + switch (peer->srx.transport_type) { +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c +index 713ac59..306f6ae 100644 +--- a/net/rxrpc/rxkad.c ++++ b/net/rxrpc/rxkad.c +@@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, + u16 check; + int nsg; + ++ pax_track_stack(); ++ + sp = rxrpc_skb(skb); + + _enter(""); +@@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, + u16 check; + int nsg; + ++ pax_track_stack(); ++ + _enter(""); + + sp = rxrpc_skb(skb); +@@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) + + len = iov[0].iov_len + iov[1].iov_len; + +- hdr.serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); +@@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn, + + len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; + +- hdr->serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); +diff --git a/net/sctp/auth.c b/net/sctp/auth.c +index 914c419..7a16d2c 100644 +--- a/net/sctp/auth.c ++++ b/net/sctp/auth.c +@@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) + struct sctp_auth_bytes *key; + + /* Verify that we are not going to overflow INT_MAX */ +- if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) ++ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes))) + return NULL; + + /* Allocate the shared key */ +diff --git a/net/sctp/proc.c b/net/sctp/proc.c +index d093cbf..9fc36fc 100644 +--- a/net/sctp/proc.c ++++ b/net/sctp/proc.c +@@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) + sctp_for_each_hentry(epb, node, &head->chain) { + ep = sctp_ep(epb); + sk = epb->sk; +- seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, ++ seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, NULL, ++#else ++ ep, sk, ++#endif + sctp_sk(sk)->type, sk->sk_state, hash, + epb->bind_addr.port, + sock_i_uid(sk), sock_i_ino(sk)); +@@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) + seq_printf(seq, + "%8p %8p %-3d %-3d %-2d %-4d " + "%4d %8d %8d %7d %5lu %-5d %5d ", +- assoc, sk, sctp_sk(sk)->type, sk->sk_state, ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, NULL, ++#else ++ assoc, sk, ++#endif ++ sctp_sk(sk)->type, sk->sk_state, + assoc->state, hash, + assoc->assoc_id, + assoc->sndbuf_used, +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 3a95fcb..c40fc1d 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -5802,7 +5802,6 @@ pp_found: + */ + int reuse = sk->sk_reuse; + struct sock *sk2; +- struct hlist_node *node; + + SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); + if (pp->fastreuse && sk->sk_reuse && +diff --git a/net/socket.c b/net/socket.c +index d449812..4ac08d3c 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -87,6 +87,7 @@ + #include <linux/wireless.h> + #include <linux/nsproxy.h> + #include <linux/magic.h> ++#include <linux/in.h> + + #include <asm/uaccess.h> + #include <asm/unistd.h> +@@ -97,6 +98,21 @@ + #include <net/sock.h> + #include <linux/netfilter.h> + ++extern void gr_attach_curr_ip(const struct sock *sk); ++extern int gr_handle_sock_all(const int family, const int type, ++ const int protocol); ++extern int gr_handle_sock_server(const struct sockaddr *sck); ++extern int gr_handle_sock_server_other(const struct sock *sck); ++extern int gr_handle_sock_client(const struct sockaddr *sck); ++extern int gr_search_connect(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_bind(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_listen(struct socket * sock); ++extern int gr_search_accept(struct socket * sock); ++extern int gr_search_socket(const int domain, const int type, ++ const int protocol); ++ + static int sock_no_open(struct inode *irrelevant, struct file *dontcare); + static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); +@@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type, + mnt); + } + +-static struct vfsmount *sock_mnt __read_mostly; ++struct vfsmount *sock_mnt __read_mostly; + + static struct file_system_type sock_fs_type = { + .name = "sockfs", +@@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol, + return -EAFNOSUPPORT; + if (type < 0 || type >= SOCK_MAX) + return -EINVAL; ++ if (protocol < 0) ++ return -EINVAL; + + /* Compatibility. + +@@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) + if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) + flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; + ++ if(!gr_search_socket(family, type, protocol)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ if (gr_handle_sock_all(family, type, protocol)) { ++ retval = -EACCES; ++ goto out; ++ } ++ + retval = sock_create(family, type, protocol, &sock); + if (retval < 0) + goto out; +@@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) + if (sock) { + err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); + if (err >= 0) { ++ if (gr_handle_sock_server((struct sockaddr *)&address)) { ++ err = -EACCES; ++ goto error; ++ } ++ err = gr_search_bind(sock, (struct sockaddr_in *)&address); ++ if (err) ++ goto error; ++ + err = security_socket_bind(sock, + (struct sockaddr *)&address, + addrlen); +@@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) + (struct sockaddr *) + &address, addrlen); + } ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) + if ((unsigned)backlog > somaxconn) + backlog = somaxconn; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ goto error; ++ } ++ ++ err = gr_search_listen(sock); ++ if (err) ++ goto error; ++ + err = security_socket_listen(sock, backlog); + if (!err) + err = sock->ops->listen(sock, backlog); + ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, + newsock->type = sock->type; + newsock->ops = sock->ops; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ sock_release(newsock); ++ goto out_put; ++ } ++ ++ err = gr_search_accept(sock); ++ if (err) { ++ sock_release(newsock); ++ goto out_put; ++ } ++ + /* + * We don't need try_module_get here, as the listening socket (sock) + * has the protocol module (sock->ops->owner) held. +@@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, + fd_install(newfd, newfile); + err = newfd; + ++ gr_attach_curr_ip(newsock->sk); ++ + out_put: + fput_light(sock->file, fput_needed); + out: +@@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, + int, addrlen) + { + struct socket *sock; ++ struct sockaddr *sck; + struct sockaddr_storage address; + int err, fput_needed; + +@@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, + if (err < 0) + goto out_put; + ++ sck = (struct sockaddr *)&address; ++ ++ if (gr_handle_sock_client(sck)) { ++ err = -EACCES; ++ goto out_put; ++ } ++ ++ err = gr_search_connect(sock, (struct sockaddr_in *)sck); ++ if (err) ++ goto out_put; ++ + err = + security_socket_connect(sock, (struct sockaddr *)&address, addrlen); + if (err) +@@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) + int err, ctl_len, iov_size, total_len; + int fput_needed; + ++ pax_track_stack(); ++ + err = -EFAULT; + if (MSG_CMSG_COMPAT & flags) { + if (get_compat_msghdr(&msg_sys, msg_compat)) +@@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, + * kernel msghdr to use the kernel address space) + */ + +- uaddr = (__force void __user *)msg_sys.msg_name; ++ uaddr = (void __force_user *)msg_sys.msg_name; + uaddr_len = COMPAT_NAMELEN(msg); + if (MSG_CMSG_COMPAT & flags) { + err = verify_compat_iovec(&msg_sys, iov, +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index ac94477..8afe5c3 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word) + #ifdef RPC_DEBUG + static void rpc_task_set_debuginfo(struct rpc_task *task) + { +- static atomic_t rpc_pid; ++ static atomic_unchecked_t rpc_pid; + + task->tk_magic = RPC_TASK_MAGIC_ID; +- task->tk_pid = atomic_inc_return(&rpc_pid); ++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid); + } + #else + static inline void rpc_task_set_debuginfo(struct rpc_task *task) +diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c +index 35fb68b..236a8bf 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma.c ++++ b/net/sunrpc/xprtrdma/svc_rdma.c +@@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE; + static unsigned int min_max_inline = 4096; + static unsigned int max_max_inline = 65536; + +-atomic_t rdma_stat_recv; +-atomic_t rdma_stat_read; +-atomic_t rdma_stat_write; +-atomic_t rdma_stat_sq_starve; +-atomic_t rdma_stat_rq_starve; +-atomic_t rdma_stat_rq_poll; +-atomic_t rdma_stat_rq_prod; +-atomic_t rdma_stat_sq_poll; +-atomic_t rdma_stat_sq_prod; ++atomic_unchecked_t rdma_stat_recv; ++atomic_unchecked_t rdma_stat_read; ++atomic_unchecked_t rdma_stat_write; ++atomic_unchecked_t rdma_stat_sq_starve; ++atomic_unchecked_t rdma_stat_rq_starve; ++atomic_unchecked_t rdma_stat_rq_poll; ++atomic_unchecked_t rdma_stat_rq_prod; ++atomic_unchecked_t rdma_stat_sq_poll; ++atomic_unchecked_t rdma_stat_sq_prod; + + /* Temporary NFS request map and context caches */ + struct kmem_cache *svc_rdma_map_cachep; +@@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write, + len -= *ppos; + if (len > *lenp) + len = *lenp; +- if (len && copy_to_user(buffer, str_buf, len)) ++ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len))) + return -EFAULT; + *lenp = len; + *ppos += len; +@@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = { + { + .procname = "rdma_stat_read", + .data = &rdma_stat_read, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_recv", + .data = &rdma_stat_recv, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_write", + .data = &rdma_stat_write, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_sq_starve", + .data = &rdma_stat_sq_starve, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_rq_starve", + .data = &rdma_stat_rq_starve, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_rq_poll", + .data = &rdma_stat_rq_poll, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_rq_prod", + .data = &rdma_stat_rq_prod, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_sq_poll", + .data = &rdma_stat_sq_poll, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, + { + .procname = "rdma_stat_sq_prod", + .data = &rdma_stat_sq_prod, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = &read_reset_stat, + }, +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +index 9e88438..8ed5cf0 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +@@ -495,7 +495,7 @@ next_sge: + svc_rdma_put_context(ctxt, 0); + goto out; + } +- atomic_inc(&rdma_stat_read); ++ atomic_inc_unchecked(&rdma_stat_read); + + if (read_wr.num_sge < chl_map->ch[ch_no].count) { + chl_map->ch[ch_no].count -= read_wr.num_sge; +@@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) + dto_q); + list_del_init(&ctxt->dto_q); + } else { +- atomic_inc(&rdma_stat_rq_starve); ++ atomic_inc_unchecked(&rdma_stat_rq_starve); + clear_bit(XPT_DATA, &xprt->xpt_flags); + ctxt = NULL; + } +@@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) + dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", + ctxt, rdma_xprt, rqstp, ctxt->wc_status); + BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); +- atomic_inc(&rdma_stat_recv); ++ atomic_inc_unchecked(&rdma_stat_recv); + + /* Build up the XDR from the receive buffers. */ + rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); +diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +index f11be72..7aad4e8 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +@@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, + write_wr.wr.rdma.remote_addr = to; + + /* Post It */ +- atomic_inc(&rdma_stat_write); ++ atomic_inc_unchecked(&rdma_stat_write); + if (svc_rdma_send(xprt, &write_wr)) + goto err; + return 0; +diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c +index 3fa5751..030ba89 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c +@@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) + return; + + ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); +- atomic_inc(&rdma_stat_rq_poll); ++ atomic_inc_unchecked(&rdma_stat_rq_poll); + + while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { + ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; +@@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) + } + + if (ctxt) +- atomic_inc(&rdma_stat_rq_prod); ++ atomic_inc_unchecked(&rdma_stat_rq_prod); + + set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); + /* +@@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) + return; + + ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); +- atomic_inc(&rdma_stat_sq_poll); ++ atomic_inc_unchecked(&rdma_stat_sq_poll); + while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { + if (wc.status != IB_WC_SUCCESS) + /* Close the transport */ +@@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) + } + + if (ctxt) +- atomic_inc(&rdma_stat_sq_prod); ++ atomic_inc_unchecked(&rdma_stat_sq_prod); + } + + static void sq_comp_handler(struct ib_cq *cq, void *cq_context) +@@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) + spin_lock_bh(&xprt->sc_lock); + if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { + spin_unlock_bh(&xprt->sc_lock); +- atomic_inc(&rdma_stat_sq_starve); ++ atomic_inc_unchecked(&rdma_stat_sq_starve); + + /* See if we can opportunistically reap SQ WR to make room */ + sq_cq_reap(xprt); +diff --git a/net/sysctl_net.c b/net/sysctl_net.c +index 0b15d72..7934fbb 100644 +--- a/net/sysctl_net.c ++++ b/net/sysctl_net.c +@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root, + struct ctl_table *table) + { + /* Allow network administrator to have same access as root. */ +- if (capable(CAP_NET_ADMIN)) { ++ if (capable_nolog(CAP_NET_ADMIN)) { + int mode = (table->mode >> 6) & 7; + return (mode << 6) | (mode << 3) | mode; + } +diff --git a/net/tipc/link.c b/net/tipc/link.c +index dd4c18b..f40d38d 100644 +--- a/net/tipc/link.c ++++ b/net/tipc/link.c +@@ -1418,7 +1418,7 @@ again: + + if (!sect_rest) { + sect_rest = msg_sect[++curr_sect].iov_len; +- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base; ++ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base; + } + + if (sect_rest < fragm_rest) +@@ -1437,7 +1437,7 @@ error: + } + } else + skb_copy_to_linear_data_offset(buf, fragm_crs, +- sect_crs, sz); ++ (const void __force_kernel *)sect_crs, sz); + sect_crs += sz; + sect_rest -= sz; + fragm_crs += sz; +diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c +index 0747d8a..e8bf3f3 100644 +--- a/net/tipc/subscr.c ++++ b/net/tipc/subscr.c +@@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub, + { + struct iovec msg_sect; + +- msg_sect.iov_base = (void *)&sub->evt; ++ msg_sect.iov_base = (void __force_user *)&sub->evt; + msg_sect.iov_len = sizeof(struct tipc_event); + + sub->evt.event = htohl(event, sub->swap); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index db8d51a..608692d 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net, + err = -ECONNREFUSED; + if (!S_ISSOCK(inode->i_mode)) + goto put_fail; ++ ++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) { ++ err = -EACCES; ++ goto put_fail; ++ } ++ + u = unix_find_socket_byinode(net, inode); + if (!u) + goto put_fail; +@@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net, + if (u) { + struct dentry *dentry; + dentry = unix_sk(u)->dentry; ++ ++ if (!gr_handle_chroot_unix(u->sk_peercred.pid)) { ++ err = -EPERM; ++ sock_put(u); ++ goto fail; ++ } ++ + if (dentry) + touch_atime(unix_sk(u)->mnt, dentry); + } else +@@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + err = security_path_mknod(&nd.path, dentry, mode, 0); + if (err) + goto out_mknod_drop_write; ++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) { ++ err = -EACCES; ++ goto out_mknod_drop_write; ++ } + err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); + out_mknod_drop_write: + mnt_drop_write(nd.path.mnt); + if (err) + goto out_mknod_dput; ++ ++ gr_handle_create(dentry, nd.path.mnt); ++ + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + dput(nd.path.dentry); + nd.path.dentry = dentry; +@@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v) + unix_state_lock(s); + + seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + atomic_read(&s->sk_refcnt), + 0, + s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, +diff --git a/net/wireless/core.h b/net/wireless/core.h +index 376798f..109a61f 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -27,7 +27,7 @@ struct cfg80211_registered_device { + struct mutex mtx; + + /* rfkill support */ +- struct rfkill_ops rfkill_ops; ++ rfkill_ops_no_const rfkill_ops; + struct rfkill *rfkill; + struct work_struct rfkill_sync; + +diff --git a/net/wireless/wext.c b/net/wireless/wext.c +index a2e4c60..0979cbe 100644 +--- a/net/wireless/wext.c ++++ b/net/wireless/wext.c +@@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + */ + + /* Support for very large requests */ +- if ((descr->flags & IW_DESCR_FLAG_NOMAX) && +- (user_length > descr->max_tokens)) { ++ if (user_length > descr->max_tokens) { + /* Allow userspace to GET more than max so + * we can support any size GET requests. + * There is still a limit : -ENOMEM. +@@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + } + } + +- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { +- /* +- * If this is a GET, but not NOMAX, it means that the extra +- * data is not bounded by userspace, but by max_tokens. Thus +- * set the length to max_tokens. This matches the extra data +- * allocation. +- * The driver should fill it with the number of tokens it +- * provided, and it may check iwp->length rather than having +- * knowledge of max_tokens. If the driver doesn't change the +- * iwp->length, this ioctl just copies back max_token tokens +- * filled with zeroes. Hopefully the driver isn't claiming +- * them to be valid data. +- */ +- iwp->length = descr->max_tokens; +- } +- + err = handler(dev, info, (union iwreq_data *) iwp, extra); + + iwp->length += essid_compat; +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index cb81ca3..e15d49a 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) + hlist_add_head(&policy->bydst, chain); + xfrm_pol_hold(policy); + net->xfrm.policy_count[dir]++; +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + if (delpol) + __xfrm_policy_unlink(delpol, dir); + policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); +@@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir, + write_unlock_bh(&xfrm_policy_lock); + + if (ret && delete) { +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + xfrm_policy_kill(ret); + } + return ret; +@@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id, + write_unlock_bh(&xfrm_policy_lock); + + if (ret && delete) { +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + xfrm_policy_kill(ret); + } + return ret; +@@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) + } + + } +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + out: + write_unlock_bh(&xfrm_policy_lock); + return err; +@@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir) + write_unlock_bh(&xfrm_policy_lock); + if (pol) { + if (dir < XFRM_POLICY_MAX) +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + xfrm_policy_kill(pol); + return 0; + } +@@ -1477,7 +1477,7 @@ free_dst: + goto out; + } + +-static int inline ++static inline int + xfrm_dst_alloc_copy(void **target, void *src, int size) + { + if (!*target) { +@@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size) + return 0; + } + +-static int inline ++static inline int + xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel) + { + #ifdef CONFIG_XFRM_SUB_POLICY +@@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel) + #endif + } + +-static int inline ++static inline int + xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl) + { + #ifdef CONFIG_XFRM_SUB_POLICY +@@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, + u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); + + restart: +- genid = atomic_read(&flow_cache_genid); ++ genid = atomic_read_unchecked(&flow_cache_genid); + policy = NULL; + for (pi = 0; pi < ARRAY_SIZE(pols); pi++) + pols[pi] = NULL; +@@ -1680,7 +1680,7 @@ restart: + goto error; + } + if (nx == -EAGAIN || +- genid != atomic_read(&flow_cache_genid)) { ++ genid != atomic_read_unchecked(&flow_cache_genid)) { + xfrm_pols_put(pols, npols); + goto restart; + } +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index b95a2d6..85c4d78 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) + struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; + int i; + ++ pax_track_stack(); ++ + if (xp->xfrm_nr == 0) + return 0; + +@@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, + int err; + int n = 0; + ++ pax_track_stack(); ++ + if (attrs[XFRMA_MIGRATE] == NULL) + return -EINVAL; + +diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c +index 45b7d56..19e828c 100644 +--- a/samples/kobject/kset-example.c ++++ b/samples/kobject/kset-example.c +@@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj, + } + + /* Our custom sysfs_ops that we will associate with our ktype later on */ +-static struct sysfs_ops foo_sysfs_ops = { ++static const struct sysfs_ops foo_sysfs_ops = { + .show = foo_attr_show, + .store = foo_attr_store, + }; +diff --git a/scripts/Makefile.build b/scripts/Makefile.build +index 341b589..405aed3 100644 +--- a/scripts/Makefile.build ++++ b/scripts/Makefile.build +@@ -59,7 +59,7 @@ endif + endif + + # Do not include host rules unless needed +-ifneq ($(hostprogs-y)$(hostprogs-m),) ++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),) + include scripts/Makefile.host + endif + +diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean +index 6f89fbb..53adc9c 100644 +--- a/scripts/Makefile.clean ++++ b/scripts/Makefile.clean +@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn)) + __clean-files := $(extra-y) $(always) \ + $(targets) $(clean-files) \ + $(host-progs) \ +- $(hostprogs-y) $(hostprogs-m) $(hostprogs-) ++ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \ ++ $(hostlibs-y) $(hostlibs-m) $(hostlibs-) + + # as clean-files is given relative to the current directory, this adds + # a $(obj) prefix, except for absolute paths +diff --git a/scripts/Makefile.host b/scripts/Makefile.host +index 1ac414f..a1c1451 100644 +--- a/scripts/Makefile.host ++++ b/scripts/Makefile.host +@@ -31,6 +31,7 @@ + # Note: Shared libraries consisting of C++ files are not supported + + __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m)) ++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m)) + + # C code + # Executables compiled from a single .c file +@@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs))) + # Shared libaries (only .c supported) + # Shared libraries (.so) - all .so files referenced in "xxx-objs" + host-cshlib := $(sort $(filter %.so, $(host-cobjs))) ++host-cshlib += $(sort $(filter %.so, $(__hostlibs))) + # Remove .so files from "xxx-objs" + host-cobjs := $(filter-out %.so,$(host-cobjs)) + +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib +index ffdafb2..4a55d60 100644 +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -145,15 +145,15 @@ __a_flags = $(call flags,_a_flags) + __cpp_flags = $(call flags,_cpp_flags) + endif + +-c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ ++c_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \ + $(__c_flags) $(modkern_cflags) \ + -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) \ + $(debug_flags) + +-a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ ++a_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \ + $(__a_flags) $(modkern_aflags) + +-cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ ++cpp_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \ + $(__cpp_flags) + + ld_flags = $(LDFLAGS) $(ldflags-y) +diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c +index 6bf21f8..c0546b3 100644 +--- a/scripts/basic/fixdep.c ++++ b/scripts/basic/fixdep.c +@@ -162,7 +162,7 @@ static void grow_config(int len) + /* + * Lookup a value in the configuration string. + */ +-static int is_defined_config(const char * name, int len) ++static int is_defined_config(const char * name, unsigned int len) + { + const char * pconfig; + const char * plast = str_config + len_config - len; +@@ -199,7 +199,7 @@ static void clear_config(void) + /* + * Record the use of a CONFIG_* word. + */ +-static void use_config(char *m, int slen) ++static void use_config(char *m, unsigned int slen) + { + char s[PATH_MAX]; + char *p; +@@ -222,9 +222,9 @@ static void use_config(char *m, int slen) + + static void parse_config_file(char *map, size_t len) + { +- int *end = (int *) (map + len); ++ unsigned int *end = (unsigned int *) (map + len); + /* start at +1, so that p can never be < map */ +- int *m = (int *) map + 1; ++ unsigned int *m = (unsigned int *) map + 1; + char *p, *q; + + for (; m < end; m++) { +@@ -371,7 +371,7 @@ static void print_deps(void) + static void traps(void) + { + static char test[] __attribute__((aligned(sizeof(int)))) = "CONF"; +- int *p = (int *)test; ++ unsigned int *p = (unsigned int *)test; + + if (*p != INT_CONF) { + fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n", +diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh +new file mode 100644 +index 0000000..8729101 +--- /dev/null ++++ b/scripts/gcc-plugin.sh +@@ -0,0 +1,2 @@ ++#!/bin/sh ++echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y" +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c +index 62a9025..65b82ad 100644 +--- a/scripts/mod/file2alias.c ++++ b/scripts/mod/file2alias.c +@@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id, + unsigned long size, unsigned long id_size, + void *symval) + { +- int i; ++ unsigned int i; + + if (size % id_size || size < id_size) { + if (cross_build != 0) +@@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id, + /* USB is special because the bcdDevice can be matched against a numeric range */ + /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */ + static void do_usb_entry(struct usb_device_id *id, +- unsigned int bcdDevice_initial, int bcdDevice_initial_digits, ++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits, + unsigned char range_lo, unsigned char range_hi, + struct module *mod) + { +@@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod) + { + unsigned int devlo, devhi; + unsigned char chi, clo; +- int ndigits; ++ unsigned int ndigits; + + id->match_flags = TO_NATIVE(id->match_flags); + id->idVendor = TO_NATIVE(id->idVendor); +@@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size, + for (i = 0; i < count; i++) { + const char *id = (char *)devs[i].id; + char acpi_id[sizeof(devs[0].id)]; +- int j; ++ unsigned int j; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS(\"pnp:d%s*\");\n", id); +@@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, + + for (j = 0; j < PNP_MAX_DEVICES; j++) { + const char *id = (char *)card->devs[j].id; +- int i2, j2; ++ unsigned int i2, j2; + int dup = 0; + + if (!id[0]) +@@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, + /* add an individual alias for every device entry */ + if (!dup) { + char acpi_id[sizeof(card->devs[0].id)]; +- int k; ++ unsigned int k; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS(\"pnp:d%s*\");\n", id); +@@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s) + static int do_dmi_entry(const char *filename, struct dmi_system_id *id, + char *alias) + { +- int i, j; ++ unsigned int i, j; + + sprintf(alias, "dmi*"); + +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c +index 03efeab..f65608f 100644 +--- a/scripts/mod/modpost.c ++++ b/scripts/mod/modpost.c +@@ -764,7 +764,7 @@ static void check_section(const char *modname, struct elf_info *elf, + + #define ALL_INIT_DATA_SECTIONS \ + ".init.setup$", ".init.rodata$", \ +- ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$" \ ++ ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$", \ + ".init.data$", ".devinit.data$", ".cpuinit.data$", ".meminit.data$" + #define ALL_EXIT_DATA_SECTIONS \ + ".exit.data$", ".devexit.data$", ".cpuexit.data$", ".memexit.data$" +@@ -835,6 +835,7 @@ enum mismatch { + INIT_TO_EXIT, + EXIT_TO_INIT, + EXPORT_TO_INIT_EXIT, ++ DATA_TO_TEXT + }; + + struct sectioncheck { +@@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = { + .fromsec = { "__ksymtab*", NULL }, + .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, + .mismatch = EXPORT_TO_INIT_EXIT ++}, ++/* Do not reference code from writable data */ ++{ ++ .fromsec = { DATA_SECTIONS, NULL }, ++ .tosec = { TEXT_SECTIONS, NULL }, ++ .mismatch = DATA_TO_TEXT + } + }; + +@@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, + continue; + if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) + continue; +- if (sym->st_value == addr) +- return sym; + /* Find a symbol nearby - addr are maybe negative */ + d = sym->st_value - addr; ++ if (d == 0) ++ return sym; + if (d < 0) + d = addr - sym->st_value; + if (d < distance) { +@@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch, + "Fix this by removing the %sannotation of %s " + "or drop the export.\n", + tosym, sec2annotation(tosec), sec2annotation(tosec), tosym); ++ case DATA_TO_TEXT: ++/* ++ fprintf(stderr, ++ "The variable %s references\n" ++ "the %s %s%s%s\n", ++ fromsym, to, sec2annotation(tosec), tosym, to_p); ++*/ ++ break; + case NO_MISMATCH: + /* To get warnings on missing members */ + break; +@@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf, + static void check_sec_ref(struct module *mod, const char *modname, + struct elf_info *elf) + { +- int i; ++ unsigned int i; + Elf_Shdr *sechdrs = elf->sechdrs; + + /* Walk through all sections */ +@@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf, + va_end(ap); + } + +-void buf_write(struct buffer *buf, const char *s, int len) ++void buf_write(struct buffer *buf, const char *s, unsigned int len) + { + if (buf->size - buf->pos < len) { + buf->size += len + SZ; +@@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname) + if (fstat(fileno(file), &st) < 0) + goto close_write; + +- if (st.st_size != b->pos) ++ if (st.st_size != (off_t)b->pos) + goto close_write; + + tmp = NOFAIL(malloc(b->pos)); +diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h +index 09f58e3..4b66092 100644 +--- a/scripts/mod/modpost.h ++++ b/scripts/mod/modpost.h +@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr); + + struct buffer { + char *p; +- int pos; +- int size; ++ unsigned int pos; ++ unsigned int size; + }; + + void __attribute__((format(printf, 2, 3))) + buf_printf(struct buffer *buf, const char *fmt, ...); + + void +-buf_write(struct buffer *buf, const char *s, int len); ++buf_write(struct buffer *buf, const char *s, unsigned int len); + + struct module { + struct module *next; +diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c +index ecf9c7d..d52b38e 100644 +--- a/scripts/mod/sumversion.c ++++ b/scripts/mod/sumversion.c +@@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum, + goto out; + } + +- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) { ++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) { + warn("writing sum in %s failed: %s\n", + filename, strerror(errno)); + goto out; +diff --git a/scripts/package/mkspec b/scripts/package/mkspec +index 47bdd2f..d4d4e93 100755 +--- a/scripts/package/mkspec ++++ b/scripts/package/mkspec +@@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules' + echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware' + echo "%endif" + +-echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install' ++echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install' + echo "%ifarch ia64" + echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE" + echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/" +diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c +index 5c11312..72742b5 100644 +--- a/scripts/pnmtologo.c ++++ b/scripts/pnmtologo.c +@@ -237,14 +237,14 @@ static void write_header(void) + fprintf(out, " * Linux logo %s\n", logoname); + fputs(" */\n\n", out); + fputs("#include <linux/linux_logo.h>\n\n", out); +- fprintf(out, "static unsigned char %s_data[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_data[] = {\n", + logoname); + } + + static void write_footer(void) + { + fputs("\n};\n\n", out); +- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); ++ fprintf(out, "const struct linux_logo %s = {\n", logoname); + fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); + fprintf(out, "\t.width\t\t= %d,\n", logo_width); + fprintf(out, "\t.height\t\t= %d,\n", logo_height); +@@ -374,7 +374,7 @@ static void write_logo_clut224(void) + fputs("\n};\n\n", out); + + /* write logo clut */ +- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_clut[] = {\n", + logoname); + write_hex_cnt = 0; + for (i = 0; i < logo_clutsize; i++) { +diff --git a/scripts/tags.sh b/scripts/tags.sh +index d52f7a0..b66cdd9 100755 +--- a/scripts/tags.sh ++++ b/scripts/tags.sh +@@ -93,6 +93,11 @@ docscope() + cscope -b -f cscope.out + } + ++dogtags() ++{ ++ all_sources | gtags -i -f - ++} ++ + exuberant() + { + all_sources | xargs $1 -a \ +@@ -164,6 +169,10 @@ case "$1" in + docscope + ;; + ++ "gtags") ++ dogtags ++ ;; ++ + "tags") + rm -f tags + xtags ctags +diff --git a/security/Kconfig b/security/Kconfig +index fb363cd..c2c0a96 100644 +--- a/security/Kconfig ++++ b/security/Kconfig +@@ -4,6 +4,634 @@ + + menu "Security options" + ++source grsecurity/Kconfig ++ ++menu "PaX" ++ ++ config ARCH_TRACK_EXEC_LIMIT ++ bool ++ ++ config PAX_KERNEXEC_PLUGIN ++ bool ++ ++ config PAX_PER_CPU_PGD ++ bool ++ ++ config TASK_SIZE_MAX_SHIFT ++ int ++ depends on X86_64 ++ default 47 if !PAX_PER_CPU_PGD ++ default 42 if PAX_PER_CPU_PGD ++ ++ config PAX_ENABLE_PAE ++ bool ++ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM)) ++ ++config PAX ++ bool "Enable various PaX features" ++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) ++ help ++ This allows you to enable various PaX features. PaX adds ++ intrusion prevention mechanisms to the kernel that reduce ++ the risks posed by exploitable memory corruption bugs. ++ ++menu "PaX Control" ++ depends on PAX ++ ++config PAX_SOFTMODE ++ bool 'Support soft mode' ++ help ++ Enabling this option will allow you to run PaX in soft mode, that ++ is, PaX features will not be enforced by default, only on executables ++ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS ++ support as they are the only way to mark executables for soft mode use. ++ ++ Soft mode can be activated by using the "pax_softmode=1" kernel command ++ line option on boot. Furthermore you can control various PaX features ++ at runtime via the entries in /proc/sys/kernel/pax. ++ ++config PAX_EI_PAX ++ bool 'Use legacy ELF header marking' ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'chpax' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ an otherwise reserved part of the ELF header. This marking has ++ numerous drawbacks (no support for soft-mode, toolchain does not ++ know about the non-standard use of the ELF header) therefore it ++ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS ++ support. ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF program ++ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this ++ option otherwise they will not get any protection. ++ ++ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking ++ support as well, they will override the legacy EI_PAX marks. ++ ++config PAX_PT_PAX_FLAGS ++ bool 'Use ELF program header marking' ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'paxctl' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking ++ has the benefits of supporting both soft mode and being fully ++ integrated into the toolchain (the binutils patch is available ++ from http://pax.grsecurity.net). ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF program ++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking ++ support otherwise they will not get any protection. ++ ++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you ++ must make sure that the marks are the same if a binary has both marks. ++ ++ Note that if you enable the legacy EI_PAX marking support as well, ++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks. ++ ++config PAX_XATTR_PAX_FLAGS ++ bool 'Use filesystem extended attributes marking' ++ select CIFS_XATTR if CIFS ++ select EXT2_FS_XATTR if EXT2_FS ++ select EXT3_FS_XATTR if EXT3_FS ++ select EXT4_FS_XATTR if EXT4_FS ++ select JFFS2_FS_XATTR if JFFS2_FS ++ select REISERFS_FS_XATTR if REISERFS_FS ++ select UBIFS_FS_XATTR if UBIFS_FS ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'setfattr' utility. The control ++ flags will be read from the user.pax.flags extended attribute of ++ the file. This marking has the benefit of supporting binary-only ++ applications that self-check themselves (e.g., skype) and would ++ not tolerate chpax/paxctl changes. The main drawback is that ++ extended attributes are not supported by some filesystems (e.g., ++ isofs, squashfs, tmpfs, udf, vfat) so copying files through such ++ filesystems will lose the extended attributes and these PaX markings. ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF program ++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking ++ support otherwise they will not get any protection. ++ ++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you ++ must make sure that the marks are the same if a binary has both marks. ++ ++ Note that if you enable the legacy EI_PAX marking support as well, ++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks. ++ ++choice ++ prompt 'MAC system integration' ++ default PAX_HAVE_ACL_FLAGS ++ help ++ Mandatory Access Control systems have the option of controlling ++ PaX flags on a per executable basis, choose the method supported ++ by your particular system. ++ ++ - "none": if your MAC system does not interact with PaX, ++ - "direct": if your MAC system defines pax_set_initial_flags() itself, ++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback. ++ ++ NOTE: this option is for developers/integrators only. ++ ++ config PAX_NO_ACL_FLAGS ++ bool 'none' ++ ++ config PAX_HAVE_ACL_FLAGS ++ bool 'direct' ++ ++ config PAX_HOOK_ACL_FLAGS ++ bool 'hook' ++endchoice ++ ++endmenu ++ ++menu "Non-executable pages" ++ depends on PAX ++ ++config PAX_NOEXEC ++ bool "Enforce non-executable pages" ++ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86 ++ help ++ By design some architectures do not allow for protecting memory ++ pages against execution or even if they do, Linux does not make ++ use of this feature. In practice this means that if a page is ++ readable (such as the stack or heap) it is also executable. ++ ++ There is a well known exploit technique that makes use of this ++ fact and a common programming mistake where an attacker can ++ introduce code of his choice somewhere in the attacked program's ++ memory (typically the stack or the heap) and then execute it. ++ ++ If the attacked program was running with different (typically ++ higher) privileges than that of the attacker, then he can elevate ++ his own privilege level (e.g. get a root shell, write to files for ++ which he does not have write access to, etc). ++ ++ Enabling this option will let you choose from various features ++ that prevent the injection and execution of 'foreign' code in ++ a program. ++ ++ This will also break programs that rely on the old behaviour and ++ expect that dynamically allocated memory via the malloc() family ++ of functions is executable (which it is not). Notable examples ++ are the XFree86 4.x server, the java runtime and wine. ++ ++config PAX_PAGEEXEC ++ bool "Paging based non-executable pages" ++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7) ++ select S390_SWITCH_AMODE if S390 ++ select S390_EXEC_PROTECT if S390 ++ select ARCH_TRACK_EXEC_LIMIT if X86_32 ++ help ++ This implementation is based on the paging feature of the CPU. ++ On i386 without hardware non-executable bit support there is a ++ variable but usually low performance impact, however on Intel's ++ P4 core based CPUs it is very high so you should not enable this ++ for kernels meant to be used on such CPUs. ++ ++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386 ++ with hardware non-executable bit support there is no performance ++ impact, on ppc the impact is negligible. ++ ++ Note that several architectures require various emulations due to ++ badly designed userland ABIs, this will cause a performance impact ++ but will disappear as soon as userland is fixed. For example, ppc ++ userland MUST have been built with secure-plt by a recent toolchain. ++ ++config PAX_SEGMEXEC ++ bool "Segmentation based non-executable pages" ++ depends on PAX_NOEXEC && X86_32 ++ help ++ This implementation is based on the segmentation feature of the ++ CPU and has a very small performance impact, however applications ++ will be limited to a 1.5 GB address space instead of the normal ++ 3 GB. ++ ++config PAX_EMUTRAMP ++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86) ++ default y if PARISC ++ help ++ There are some programs and libraries that for one reason or ++ another attempt to execute special small code snippets from ++ non-executable memory pages. Most notable examples are the ++ signal handler return code generated by the kernel itself and ++ the GCC trampolines. ++ ++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then ++ such programs will no longer work under your kernel. ++ ++ As a remedy you can say Y here and use the 'chpax' or 'paxctl' ++ utilities to enable trampoline emulation for the affected programs ++ yet still have the protection provided by the non-executable pages. ++ ++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise ++ your system will not even boot. ++ ++ Alternatively you can say N here and use the 'chpax' or 'paxctl' ++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC ++ for the affected files. ++ ++ NOTE: enabling this feature *may* open up a loophole in the ++ protection provided by non-executable pages that an attacker ++ could abuse. Therefore the best solution is to not have any ++ files on your system that would require this option. This can ++ be achieved by not using libc5 (which relies on the kernel ++ signal handler return code) and not using or rewriting programs ++ that make use of the nested function implementation of GCC. ++ Skilled users can just fix GCC itself so that it implements ++ nested function calls in a way that does not interfere with PaX. ++ ++config PAX_EMUSIGRT ++ bool "Automatically emulate sigreturn trampolines" ++ depends on PAX_EMUTRAMP && PARISC ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate signal return trampolines executing on the stack ++ that would otherwise lead to task termination. ++ ++ This solution is intended as a temporary one for users with ++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17, ++ Modula-3 runtime, etc) or executables linked to such, basically ++ everything that does not specify its own SA_RESTORER function in ++ normal executable memory like glibc 2.1+ does. ++ ++ On parisc you MUST enable this option, otherwise your system will ++ not even boot. ++ ++ NOTE: this feature cannot be disabled on a per executable basis ++ and since it *does* open up a loophole in the protection provided ++ by non-executable pages, the best solution is to not have any ++ files on your system that would require this option. ++ ++config PAX_MPROTECT ++ bool "Restrict mprotect()" ++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) ++ help ++ Enabling this option will prevent programs from ++ - changing the executable status of memory pages that were ++ not originally created as executable, ++ - making read-only executable pages writable again, ++ - creating executable pages from anonymous memory, ++ - making read-only-after-relocations (RELRO) data pages writable again. ++ ++ You should say Y here to complete the protection provided by ++ the enforcement of non-executable pages. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_MPROTECT_COMPAT ++ bool "Use legacy/compat protection demoting (read help)" ++ depends on PAX_MPROTECT ++ default n ++ help ++ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects ++ by sending the proper error code to the application. For some broken ++ userland, this can cause problems with Python or other applications. The ++ current implementation however allows for applications like clamav to ++ detect if JIT compilation/execution is allowed and to fall back gracefully ++ to an interpreter-based mode if it does not. While we encourage everyone ++ to use the current implementation as-is and push upstream to fix broken ++ userland (note that the RWX logging option can assist with this), in some ++ environments this may not be possible. Having to disable MPROTECT ++ completely on certain binaries reduces the security benefit of PaX, ++ so this option is provided for those environments to revert to the old ++ behavior. ++ ++config PAX_ELFRELOCS ++ bool "Allow ELF text relocations (read help)" ++ depends on PAX_MPROTECT ++ default n ++ help ++ Non-executable pages and mprotect() restrictions are effective ++ in preventing the introduction of new executable code into an ++ attacked task's address space. There remain only two venues ++ for this kind of attack: if the attacker can execute already ++ existing code in the attacked task then he can either have it ++ create and mmap() a file containing his code or have it mmap() ++ an already existing ELF library that does not have position ++ independent code in it and use mprotect() on it to make it ++ writable and copy his code there. While protecting against ++ the former approach is beyond PaX, the latter can be prevented ++ by having only PIC ELF libraries on one's system (which do not ++ need to relocate their code). If you are sure this is your case, ++ as is the case with all modern Linux distributions, then leave ++ this option disabled. You should say 'n' here. ++ ++config PAX_ETEXECRELOCS ++ bool "Allow ELF ET_EXEC text relocations" ++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC) ++ select PAX_ELFRELOCS ++ default y ++ help ++ On some architectures there are incorrectly created applications ++ that require text relocations and would not work without enabling ++ this option. If you are an alpha, ia64 or parisc user, you should ++ enable this option and disable it once you have made sure that ++ none of your applications need it. ++ ++config PAX_EMUPLT ++ bool "Automatically emulate ELF PLT" ++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC) ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate the Procedure Linkage Table entries in ELF files. ++ On some architectures such entries are in writable memory, and ++ become non-executable leading to task termination. Therefore ++ it is mandatory that you enable this option on alpha, parisc, ++ sparc and sparc64, otherwise your system would not even boot. ++ ++ NOTE: this feature *does* open up a loophole in the protection ++ provided by the non-executable pages, therefore the proper ++ solution is to modify the toolchain to produce a PLT that does ++ not need to be writable. ++ ++config PAX_DLRESOLVE ++ bool 'Emulate old glibc resolver stub' ++ depends on PAX_EMUPLT && SPARC ++ default n ++ help ++ This option is needed if userland has an old glibc (before 2.4) ++ that puts a 'save' instruction into the runtime generated resolver ++ stub that needs special emulation. ++ ++config PAX_KERNEXEC ++ bool "Enforce non-executable kernel pages" ++ depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN ++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE) ++ select PAX_KERNEXEC_PLUGIN if X86_64 ++ help ++ This is the kernel land equivalent of PAGEEXEC and MPROTECT, ++ that is, enabling this option will make it harder to inject ++ and execute 'foreign' code in kernel memory itself. ++ ++ Note that on x86_64 kernels there is a known regression when ++ this feature and KVM/VMX are both enabled in the host kernel. ++ ++choice ++ prompt "Return Address Instrumentation Method" ++ default PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ depends on PAX_KERNEXEC_PLUGIN ++ help ++ Select the method used to instrument function pointer dereferences. ++ Note that binary modules cannot be instrumented by this approach. ++ ++ config PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ bool "bts" ++ help ++ This method is compatible with binary only modules but has ++ a higher runtime overhead. ++ ++ config PAX_KERNEXEC_PLUGIN_METHOD_OR ++ bool "or" ++ depends on !PARAVIRT ++ help ++ This method is incompatible with binary only modules but has ++ a lower runtime overhead. ++endchoice ++ ++config PAX_KERNEXEC_PLUGIN_METHOD ++ string ++ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR ++ default "" ++ ++config PAX_KERNEXEC_MODULE_TEXT ++ int "Minimum amount of memory reserved for module code" ++ default "4" ++ depends on PAX_KERNEXEC && X86_32 && MODULES ++ help ++ Due to implementation details the kernel must reserve a fixed ++ amount of memory for module code at compile time that cannot be ++ changed at runtime. Here you can specify the minimum amount ++ in MB that will be reserved. Due to the same implementation ++ details this size will always be rounded up to the next 2/4 MB ++ boundary (depends on PAE) so the actually available memory for ++ module code will usually be more than this minimum. ++ ++ The default 4 MB should be enough for most users but if you have ++ an excessive number of modules (e.g., most distribution configs ++ compile many drivers as modules) or use huge modules such as ++ nvidia's kernel driver, you will need to adjust this amount. ++ A good rule of thumb is to look at your currently loaded kernel ++ modules and add up their sizes. ++ ++endmenu ++ ++menu "Address Space Layout Randomization" ++ depends on PAX ++ ++config PAX_ASLR ++ bool "Address Space Layout Randomization" ++ help ++ Many if not most exploit techniques rely on the knowledge of ++ certain addresses in the attacked program. The following options ++ will allow the kernel to apply a certain amount of randomization ++ to specific parts of the program thereby forcing an attacker to ++ guess them in most cases. Any failed guess will most likely crash ++ the attacked program which allows the kernel to detect such attempts ++ and react on them. PaX itself provides no reaction mechanisms, ++ instead it is strongly encouraged that you make use of Nergal's ++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's ++ (http://www.grsecurity.net/) built-in crash detection features or ++ develop one yourself. ++ ++ By saying Y here you can choose to randomize the following areas: ++ - top of the task's kernel stack ++ - top of the task's userland stack ++ - base address for mmap() requests that do not specify one ++ (this includes all libraries) ++ - base address of the main executable ++ ++ It is strongly recommended to say Y here as address space layout ++ randomization has negligible impact on performance yet it provides ++ a very effective protection. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_RANDKSTACK ++ bool "Randomize kernel stack base" ++ depends on X86_TSC && X86 ++ help ++ By saying Y here the kernel will randomize every task's kernel ++ stack on every system call. This will not only force an attacker ++ to guess it but also prevent him from making use of possible ++ leaked information about it. ++ ++ Since the kernel stack is a rather scarce resource, randomization ++ may cause unexpected stack overflows, therefore you should very ++ carefully test your system. Note that once enabled in the kernel ++ configuration, this feature cannot be disabled on a per file basis. ++ ++config PAX_RANDUSTACK ++ bool "Randomize user stack base" ++ depends on PAX_ASLR ++ help ++ By saying Y here the kernel will randomize every task's userland ++ stack. The randomization is done in two steps where the second ++ one may apply a big amount of shift to the top of the stack and ++ cause problems for programs that want to use lots of memory (more ++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is). ++ For this reason the second step can be controlled by 'chpax' or ++ 'paxctl' on a per file basis. ++ ++config PAX_RANDMMAP ++ bool "Randomize mmap() base" ++ depends on PAX_ASLR ++ help ++ By saying Y here the kernel will use a randomized base address for ++ mmap() requests that do not specify one themselves. As a result ++ all dynamically loaded libraries will appear at random addresses ++ and therefore be harder to exploit by a technique where an attacker ++ attempts to execute library code for his purposes (e.g. spawn a ++ shell from an exploited program that is running at an elevated ++ privilege level). ++ ++ Furthermore, if a program is relinked as a dynamic ELF file, its ++ base address will be randomized as well, completing the full ++ randomization of the address space layout. Attacking such programs ++ becomes a guess game. You can find an example of doing this at ++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at ++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz . ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this ++ feature on a per file basis. ++ ++endmenu ++ ++menu "Miscellaneous hardening features" ++ ++config PAX_MEMORY_SANITIZE ++ bool "Sanitize all freed memory" ++ depends on !HIBERNATION ++ help ++ By saying Y here the kernel will erase memory pages as soon as they ++ are freed. This in turn reduces the lifetime of data stored in the ++ pages, making it less likely that sensitive information such as ++ passwords, cryptographic secrets, etc stay in memory for too long. ++ ++ This is especially useful for programs whose runtime is short, long ++ lived processes and the kernel itself benefit from this as long as ++ they operate on whole memory pages and ensure timely freeing of pages ++ that may hold sensitive information. ++ ++ The tradeoff is performance impact, on a single CPU system kernel ++ compilation sees a 3% slowdown, other systems and workloads may vary ++ and you are advised to test this feature on your expected workload ++ before deploying it. ++ ++ Note that this feature does not protect data stored in live pages, ++ e.g., process memory swapped to disk may stay there for a long time. ++ ++config PAX_MEMORY_STACKLEAK ++ bool "Sanitize kernel stack" ++ depends on X86 ++ help ++ By saying Y here the kernel will erase the kernel stack before it ++ returns from a system call. This in turn reduces the information ++ that a kernel stack leak bug can reveal. ++ ++ Note that such a bug can still leak information that was put on ++ the stack by the current system call (the one eventually triggering ++ the bug) but traces of earlier system calls on the kernel stack ++ cannot leak anymore. ++ ++ The tradeoff is performance impact, on a single CPU system kernel ++ compilation sees a 1% slowdown, other systems and workloads may vary ++ and you are advised to test this feature on your expected workload ++ before deploying it. ++ ++ Note: full support for this feature requires gcc with plugin support ++ so make sure your compiler is at least gcc 4.5.0. Using older gcc ++ versions means that functions with large enough stack frames may ++ leave uninitialized memory behind that may be exposed to a later ++ syscall leaking the stack. ++ ++config PAX_MEMORY_UDEREF ++ bool "Prevent invalid userland pointer dereference" ++ depends on X86 && !UML_X86 && !XEN ++ select PAX_PER_CPU_PGD if X86_64 ++ help ++ By saying Y here the kernel will be prevented from dereferencing ++ userland pointers in contexts where the kernel expects only kernel ++ pointers. This is both a useful runtime debugging feature and a ++ security measure that prevents exploiting a class of kernel bugs. ++ ++ The tradeoff is that some virtualization solutions may experience ++ a huge slowdown and therefore you should not enable this feature ++ for kernels meant to run in such environments. Whether a given VM ++ solution is affected or not is best determined by simply trying it ++ out, the performance impact will be obvious right on boot as this ++ mechanism engages from very early on. A good rule of thumb is that ++ VMs running on CPUs without hardware virtualization support (i.e., ++ the majority of IA-32 CPUs) will likely experience the slowdown. ++ ++config PAX_REFCOUNT ++ bool "Prevent various kernel object reference counter overflows" ++ depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86) ++ help ++ By saying Y here the kernel will detect and prevent overflowing ++ various (but not all) kinds of object reference counters. Such ++ overflows can normally occur due to bugs only and are often, if ++ not always, exploitable. ++ ++ The tradeoff is that data structures protected by an overflowed ++ refcount will never be freed and therefore will leak memory. Note ++ that this leak also happens even without this protection but in ++ that case the overflow can eventually trigger the freeing of the ++ data structure while it is still being used elsewhere, resulting ++ in the exploitable situation that this feature prevents. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++config PAX_USERCOPY ++ bool "Harden heap object copies between kernel and userland" ++ depends on X86 || PPC || SPARC || ARM ++ depends on GRKERNSEC && (SLAB || SLUB || SLOB) ++ help ++ By saying Y here the kernel will enforce the size of heap objects ++ when they are copied in either direction between the kernel and ++ userland, even if only a part of the heap object is copied. ++ ++ Specifically, this checking prevents information leaking from the ++ kernel heap during kernel to userland copies (if the kernel heap ++ object is otherwise fully initialized) and prevents kernel heap ++ overflows during userland to kernel copies. ++ ++ Note that the current implementation provides the strictest bounds ++ checks for the SLUB allocator. ++ ++ Enabling this option also enables per-slab cache protection against ++ data in a given cache being copied into/out of via userland ++ accessors. Though the whitelist of regions will be reduced over ++ time, it notably protects important data structures like task structs. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++config PAX_SIZE_OVERFLOW ++ bool "Prevent various integer overflows in function size parameters" ++ depends on X86 ++ help ++ By saying Y here the kernel recomputes expressions of function ++ arguments marked by a size_overflow attribute with double integer ++ precision (DImode/TImode for 32/64 bit integer types). ++ ++ The recomputed argument is checked against INT_MAX and an event ++ is logged on overflow and the triggering process is killed. ++ ++ Homepage: ++ http://www.grsecurity.net/~ephox/overflow_plugin/ ++ ++endmenu ++ ++endmenu ++ + config KEYS + bool "Enable access key retention support" + help +@@ -146,7 +774,7 @@ config INTEL_TXT + config LSM_MMAP_MIN_ADDR + int "Low address space for LSM to protect from user allocation" + depends on SECURITY && SECURITY_SELINUX +- default 65536 ++ default 32768 + help + This is the portion of low virtual memory which should be protected + from userspace allocation. Keeping a user from writing to low pages +diff --git a/security/capability.c b/security/capability.c +index fce07a7..5f12858 100644 +--- a/security/capability.c ++++ b/security/capability.c +@@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule) + } + #endif /* CONFIG_AUDIT */ + +-struct security_operations default_security_ops = { ++struct security_operations default_security_ops __read_only = { + .name = "default", + }; + +diff --git a/security/commoncap.c b/security/commoncap.c +index fe30751..7702d78 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -27,6 +27,8 @@ + #include <linux/sched.h> + #include <linux/prctl.h> + #include <linux/securebits.h> ++#include <linux/syslog.h> ++#include <net/sock.h> + + /* + * If a non-root user executes a setuid-root binary in +@@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname) + } + } + ++#ifdef CONFIG_NET ++extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk); ++#endif ++ + int cap_netlink_send(struct sock *sk, struct sk_buff *skb) + { ++#ifdef CONFIG_NET ++ NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk); ++#else + NETLINK_CB(skb).eff_cap = current_cap(); ++#endif ++ + return 0; + } + +@@ -511,6 +522,11 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) + } + skip: + ++ /* if we have fs caps, clear dangerous personality flags */ ++ if (!cap_issubset(new->cap_permitted, old->cap_permitted)) ++ bprm->per_clear |= PER_CLEAR_ON_SETID; ++ ++ + /* Don't let someone trace a set[ug]id/setpcap binary with the revised + * credentials unless they have the appropriate permit + */ +@@ -582,6 +598,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm) + { + const struct cred *cred = current_cred(); + ++ if (gr_acl_enable_at_secure()) ++ return 1; ++ + if (cred->uid != 0) { + if (bprm->cap_effective) + return 1; +@@ -956,13 +975,18 @@ error: + /** + * cap_syslog - Determine whether syslog function is permitted + * @type: Function requested ++ * @from_file: Whether this request came from an open file (i.e. /proc) + * + * Determine whether the current process is permitted to use a particular + * syslog function, returning 0 if permission is granted, -ve if not. + */ +-int cap_syslog(int type) ++int cap_syslog(int type, bool from_file) + { +- if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN)) ++ /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */ ++ if (type != SYSLOG_ACTION_OPEN && from_file) ++ return 0; ++ if ((type != SYSLOG_ACTION_READ_ALL && ++ type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + return 0; + } +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index 165eb53..b1db4eb 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, + extern spinlock_t ima_queue_lock; + + struct ima_h_table { +- atomic_long_t len; /* number of stored measurements in the list */ +- atomic_long_t violations; ++ atomic_long_unchecked_t len; /* number of stored measurements in the list */ ++ atomic_long_unchecked_t violations; + struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE]; + }; + extern struct ima_h_table ima_htable; +diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c +index 852bf85..35d6df3 100644 +--- a/security/integrity/ima/ima_api.c ++++ b/security/integrity/ima/ima_api.c +@@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, + int result; + + /* can overflow, only indicator */ +- atomic_long_inc(&ima_htable.violations); ++ atomic_long_inc_unchecked(&ima_htable.violations); + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { +diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c +index 0c72c9c..433e29b 100644 +--- a/security/integrity/ima/ima_fs.c ++++ b/security/integrity/ima/ima_fs.c +@@ -27,12 +27,12 @@ + static int valid_policy = 1; + #define TMPBUFLEN 12 + static ssize_t ima_show_htable_value(char __user *buf, size_t count, +- loff_t *ppos, atomic_long_t *val) ++ loff_t *ppos, atomic_long_unchecked_t *val) + { + char tmpbuf[TMPBUFLEN]; + ssize_t len; + +- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val)); ++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val)); + return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); + } + +diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c +index e19316d..339f7ae 100644 +--- a/security/integrity/ima/ima_queue.c ++++ b/security/integrity/ima/ima_queue.c +@@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry) + INIT_LIST_HEAD(&qe->later); + list_add_tail_rcu(&qe->later, &ima_measurements); + +- atomic_long_inc(&ima_htable.len); ++ atomic_long_inc_unchecked(&ima_htable.len); + key = ima_hash_key(entry->digest); + hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); + return 0; +diff --git a/security/keys/keyring.c b/security/keys/keyring.c +index e031952..c9a535d 100644 +--- a/security/keys/keyring.c ++++ b/security/keys/keyring.c +@@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring, + ret = -EFAULT; + + for (loop = 0; loop < klist->nkeys; loop++) { ++ key_serial_t serial; + key = klist->keys[loop]; ++ serial = key->serial; + + tmp = sizeof(key_serial_t); + if (tmp > buflen) + tmp = buflen; + +- if (copy_to_user(buffer, +- &key->serial, +- tmp) != 0) ++ if (copy_to_user(buffer, &serial, tmp)) + goto error; + + buflen -= tmp; +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c +index 931cfda..e71808a 100644 +--- a/security/keys/process_keys.c ++++ b/security/keys/process_keys.c +@@ -208,7 +208,7 @@ static int install_process_keyring(void) + ret = install_process_keyring_to_cred(new); + if (ret < 0) { + abort_creds(new); +- return ret != -EEXIST ?: 0; ++ return ret != -EEXIST ? ret : 0; + } + + return commit_creds(new); +diff --git a/security/min_addr.c b/security/min_addr.c +index d9f9425..c28cef4 100644 +--- a/security/min_addr.c ++++ b/security/min_addr.c +@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; + */ + static void update_mmap_min_addr(void) + { ++#ifndef SPARC + #ifdef CONFIG_LSM_MMAP_MIN_ADDR + if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR) + mmap_min_addr = dac_mmap_min_addr; +@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void) + #else + mmap_min_addr = dac_mmap_min_addr; + #endif ++#endif + } + + /* +diff --git a/security/root_plug.c b/security/root_plug.c +index 2f7ffa6..0455400 100644 +--- a/security/root_plug.c ++++ b/security/root_plug.c +@@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm) + return 0; + } + +-static struct security_operations rootplug_security_ops = { ++static struct security_operations rootplug_security_ops __read_only = { + .bprm_check_security = rootplug_bprm_check_security, + }; + +diff --git a/security/security.c b/security/security.c +index c4c6732..7abf13b 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1]; + extern struct security_operations default_security_ops; + extern void security_fixup_ops(struct security_operations *ops); + +-struct security_operations *security_ops; /* Initialized to NULL */ ++struct security_operations *security_ops __read_only; /* Initialized to NULL */ + + static inline int verify(struct security_operations *ops) + { +@@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops) + * If there is already a security module registered with the kernel, + * an error will be returned. Otherwise %0 is returned on success. + */ +-int register_security(struct security_operations *ops) ++int __init register_security(struct security_operations *ops) + { + if (verify(ops)) { + printk(KERN_DEBUG "%s could not verify " +@@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry) + return security_ops->quota_on(dentry); + } + +-int security_syslog(int type) ++int security_syslog(int type, bool from_file) + { +- return security_ops->syslog(type); ++ return security_ops->syslog(type, from_file); + } + + int security_settime(struct timespec *ts, struct timezone *tz) +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index a106754..ca3a589 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -76,6 +76,7 @@ + #include <linux/selinux.h> + #include <linux/mutex.h> + #include <linux/posix-timers.h> ++#include <linux/syslog.h> + + #include "avc.h" + #include "objsec.h" +@@ -131,7 +132,7 @@ int selinux_enabled = 1; + * Minimal support for a secondary security module, + * just to allow the use of the capability module. + */ +-static struct security_operations *secondary_ops; ++static struct security_operations *secondary_ops __read_only; + + /* Lists of inode and superblock security structures initialized + before the policy was loaded. */ +@@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry) + return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); + } + +-static int selinux_syslog(int type) ++static int selinux_syslog(int type, bool from_file) + { + int rc; + +- rc = cap_syslog(type); ++ rc = cap_syslog(type, from_file); + if (rc) + return rc; + + switch (type) { +- case 3: /* Read last kernel messages */ +- case 10: /* Return size of the log buffer */ ++ case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ ++ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ + rc = task_has_system(current, SYSTEM__SYSLOG_READ); + break; +- case 6: /* Disable logging to console */ +- case 7: /* Enable logging to console */ +- case 8: /* Set level of messages printed to console */ ++ case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */ ++ case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */ ++ /* Set level of messages printed to console */ ++ case SYSLOG_ACTION_CONSOLE_LEVEL: + rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE); + break; +- case 0: /* Close log */ +- case 1: /* Open log */ +- case 2: /* Read from log */ +- case 4: /* Read/clear last kernel messages */ +- case 5: /* Clear ring buffer */ ++ case SYSLOG_ACTION_CLOSE: /* Close log */ ++ case SYSLOG_ACTION_OPEN: /* Open log */ ++ case SYSLOG_ACTION_READ: /* Read from log */ ++ case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */ ++ case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ + default: + rc = task_has_system(current, SYSTEM__SYSLOG_MOD); + break; +@@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer) + + #endif + +-static struct security_operations selinux_ops = { ++static struct security_operations selinux_ops __read_only = { + .name = "selinux", + + .ptrace_access_check = selinux_ptrace_access_check, +@@ -5841,7 +5843,9 @@ int selinux_disable(void) + avc_disable(); + + /* Reset security_ops to the secondary module, dummy or capability. */ ++ pax_open_kernel(); + security_ops = secondary_ops; ++ pax_close_kernel(); + + /* Unregister netfilter hooks. */ + selinux_nf_ip_exit(); +diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h +index 13128f9..c23c736 100644 +--- a/security/selinux/include/xfrm.h ++++ b/security/selinux/include/xfrm.h +@@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); + + static inline void selinux_xfrm_notify_policyload(void) + { +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + } + #else + static inline int selinux_xfrm_enabled(void) +diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c +index ff17820..d68084c 100644 +--- a/security/selinux/ss/services.c ++++ b/security/selinux/ss/services.c +@@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len) + int rc = 0; + struct policy_file file = { data, len }, *fp = &file; + ++ pax_track_stack(); ++ + if (!ss_initialized) { + avtab_cache_init(); + if (policydb_read(&policydb, fp)) { +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index c33b6bb..b51f19e 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp) + * + * Returns 0 on success, error code otherwise. + */ +-static int smack_syslog(int type) ++static int smack_syslog(int type, bool from_file) + { + int rc; + char *sp = current_security(); + +- rc = cap_syslog(type); ++ rc = cap_syslog(type, from_file); + if (rc != 0) + return rc; + +@@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) + return 0; + } + +-struct security_operations smack_ops = { ++struct security_operations smack_ops __read_only = { + .name = "smack", + + .ptrace_access_check = smack_ptrace_access_check, +diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c +index 9548a09..9a5f384 100644 +--- a/security/tomoyo/tomoyo.c ++++ b/security/tomoyo/tomoyo.c +@@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred) + * tomoyo_security_ops is a "struct security_operations" which is used for + * registering TOMOYO. + */ +-static struct security_operations tomoyo_security_ops = { ++static struct security_operations tomoyo_security_ops __read_only = { + .name = "tomoyo", + .cred_alloc_blank = tomoyo_cred_alloc_blank, + .cred_prepare = tomoyo_cred_prepare, +diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c +index 84bb07d..c2ab6b6 100644 +--- a/sound/aoa/codecs/onyx.c ++++ b/sound/aoa/codecs/onyx.c +@@ -53,7 +53,7 @@ struct onyx { + spdif_locked:1, + analog_locked:1, + original_mute:2; +- int open_count; ++ local_t open_count; + struct codec_info *codec_info; + + /* mutex serializes concurrent access to the device +@@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii, + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count++; ++ local_inc(&onyx->open_count); + mutex_unlock(&onyx->mutex); + + return 0; +@@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii, + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count--; +- if (!onyx->open_count) ++ if (local_dec_and_test(&onyx->open_count)) + onyx->spdif_locked = onyx->analog_locked = 0; + mutex_unlock(&onyx->mutex); + +diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h +index ffd2025..df062c9 100644 +--- a/sound/aoa/codecs/onyx.h ++++ b/sound/aoa/codecs/onyx.h +@@ -11,6 +11,7 @@ + #include <linux/i2c.h> + #include <asm/pmac_low_i2c.h> + #include <asm/prom.h> ++#include <asm/local.h> + + /* PCM3052 register definitions */ + +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index d9c9635..bc0a5a2 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha + } + } else { + tmp = snd_pcm_oss_write2(substream, +- (const char __force *)buf, ++ (const char __force_kernel *)buf, + runtime->oss.period_bytes, 0); + if (tmp <= 0) + goto err; +@@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use + xfer += tmp; + runtime->oss.buffer_used -= tmp; + } else { +- tmp = snd_pcm_oss_read2(substream, (char __force *)buf, ++ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf, + runtime->oss.period_bytes, 0); + if (tmp <= 0) + goto err; +diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c +index 038232d..7dd9e5c 100644 +--- a/sound/core/pcm_compat.c ++++ b/sound/core/pcm_compat.c +@@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream, + int err; + + fs = snd_enter_user(); +- err = snd_pcm_delay(substream, &delay); ++ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay); + snd_leave_user(fs); + if (err < 0) + return err; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index e6d2d97..4843949 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, + switch (substream->stream) { + case SNDRV_PCM_STREAM_PLAYBACK: + result = snd_pcm_playback_ioctl1(NULL, substream, cmd, +- (void __user *)arg); ++ (void __force_user *)arg); + break; + case SNDRV_PCM_STREAM_CAPTURE: + result = snd_pcm_capture_ioctl1(NULL, substream, cmd, +- (void __user *)arg); ++ (void __force_user *)arg); + break; + default: + result = -EINVAL; +diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c +index 1f99767..14636533 100644 +--- a/sound/core/seq/seq_device.c ++++ b/sound/core/seq/seq_device.c +@@ -63,7 +63,7 @@ struct ops_list { + int argsize; /* argument size */ + + /* operators */ +- struct snd_seq_dev_ops ops; ++ struct snd_seq_dev_ops *ops; + + /* registred devices */ + struct list_head dev_list; /* list of devices */ +@@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, + + mutex_lock(&ops->reg_mutex); + /* copy driver operators */ +- ops->ops = *entry; ++ ops->ops = entry; + ops->driver |= DRIVER_LOADED; + ops->argsize = argsize; + +@@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops) + dev->name, ops->id, ops->argsize, dev->argsize); + return -EINVAL; + } +- if (ops->ops.init_device(dev) >= 0) { ++ if (ops->ops->init_device(dev) >= 0) { + dev->status = SNDRV_SEQ_DEVICE_REGISTERED; + ops->num_init_devices++; + } else { +@@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops) + dev->name, ops->id, ops->argsize, dev->argsize); + return -EINVAL; + } +- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) { ++ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) { + dev->status = SNDRV_SEQ_DEVICE_FREE; + dev->driver_data = NULL; + ops->num_init_devices--; +diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c +index 9284829..ac8e8b2 100644 +--- a/sound/drivers/mts64.c ++++ b/sound/drivers/mts64.c +@@ -27,6 +27,7 @@ + #include <sound/initval.h> + #include <sound/rawmidi.h> + #include <sound/control.h> ++#include <asm/local.h> + + #define CARD_NAME "Miditerminal 4140" + #define DRIVER_NAME "MTS64" +@@ -65,7 +66,7 @@ struct mts64 { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ local_t open_count; + int current_midi_output_port; + int current_midi_input_port; + u8 mode[MTS64_NUM_INPUT_PORTS]; +@@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) + { + struct mts64 *mts = substream->rmidi->private_data; + +- if (mts->open_count == 0) { ++ if (local_read(&mts->open_count) == 0) { + /* We don't need a spinlock here, because this is just called + if the device has not been opened before. + So there aren't any IRQs from the device */ +@@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) + + msleep(50); + } +- ++(mts->open_count); ++ local_inc(&mts->open_count); + + return 0; + } +@@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) + struct mts64 *mts = substream->rmidi->private_data; + unsigned long flags; + +- --(mts->open_count); +- if (mts->open_count == 0) { ++ if (local_dec_return(&mts->open_count) == 0) { + /* We need the spinlock_irqsave here because we can still + have IRQs at this point */ + spin_lock_irqsave(&mts->lock, flags); +@@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) + + msleep(500); + +- } else if (mts->open_count < 0) +- mts->open_count = 0; ++ } else if (local_read(&mts->open_count) < 0) ++ local_set(&mts->open_count, 0); + + return 0; + } +diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c +index 01997f2..cbc1195 100644 +--- a/sound/drivers/opl4/opl4_lib.c ++++ b/sound/drivers/opl4/opl4_lib.c +@@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); + MODULE_DESCRIPTION("OPL4 driver"); + MODULE_LICENSE("GPL"); + +-static void inline snd_opl4_wait(struct snd_opl4 *opl4) ++static inline void snd_opl4_wait(struct snd_opl4 *opl4) + { + int timeout = 10; + while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0) +diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c +index 60158e2..0a0cc1a 100644 +--- a/sound/drivers/portman2x4.c ++++ b/sound/drivers/portman2x4.c +@@ -46,6 +46,7 @@ + #include <sound/initval.h> + #include <sound/rawmidi.h> + #include <sound/control.h> ++#include <asm/local.h> + + #define CARD_NAME "Portman 2x4" + #define DRIVER_NAME "portman" +@@ -83,7 +84,7 @@ struct portman { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ local_t open_count; + int mode[PORTMAN_NUM_INPUT_PORTS]; + struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS]; + }; +diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c +index 02f79d2..8691d43 100644 +--- a/sound/isa/cmi8330.c ++++ b/sound/isa/cmi8330.c +@@ -173,7 +173,7 @@ struct snd_cmi8330 { + + struct snd_pcm *pcm; + struct snd_cmi8330_stream { +- struct snd_pcm_ops ops; ++ snd_pcm_ops_no_const ops; + snd_pcm_open_callback_t open; + void *private_data; /* sb or wss */ + } streams[2]; +diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c +index 733b014..56ce96f 100644 +--- a/sound/oss/sb_audio.c ++++ b/sound/oss/sb_audio.c +@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev, + buf16 = (signed short *)(localbuf + localoffs); + while (c) + { +- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); ++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); + if (copy_from_user(lbuf8, + userbuf+useroffs + p, + locallen)) +diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c +index 3136c88..28ad950 100644 +--- a/sound/oss/swarm_cs4297a.c ++++ b/sound/oss/swarm_cs4297a.c +@@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void) + { + struct cs4297a_state *s; + u32 pwr, id; +- mm_segment_t fs; + int rval; + #ifndef CONFIG_BCM_CS4297A_CSWARM + u64 cfg; +@@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void) + if (!rval) { + char *sb1250_duart_present; + ++#if 0 ++ mm_segment_t fs; + fs = get_fs(); + set_fs(KERNEL_DS); +-#if 0 + val = SOUND_MASK_LINE; + mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val); + for (i = 0; i < ARRAY_SIZE(initvol); i++) { + val = initvol[i].vol; + mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val); + } ++ set_fs(fs); + // cs4297a_write_ac97(s, 0x18, 0x0808); + #else + // cs4297a_write_ac97(s, 0x5e, 0x180); + cs4297a_write_ac97(s, 0x02, 0x0808); + cs4297a_write_ac97(s, 0x18, 0x0808); + #endif +- set_fs(fs); + + list_add(&s->list, &cs4297a_devs); + +diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c +index 78288db..0406809 100644 +--- a/sound/pci/ac97/ac97_codec.c ++++ b/sound/pci/ac97/ac97_codec.c +@@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device) + } + + /* build_ops to do nothing */ +-static struct snd_ac97_build_ops null_build_ops; ++static const struct snd_ac97_build_ops null_build_ops; + + #ifdef CONFIG_SND_AC97_POWER_SAVE + static void do_update_power(struct work_struct *work) +diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c +index eeb2e23..82bf625 100644 +--- a/sound/pci/ac97/ac97_patch.c ++++ b/sound/pci/ac97/ac97_patch.c +@@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = { ++static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = { + .build_spdif = patch_yamaha_ymf743_build_spdif, + .build_3d = patch_yamaha_ymf7x3_3d, + }; +@@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = { ++static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = { + .build_3d = patch_yamaha_ymf7x3_3d, + .build_post_spdif = patch_yamaha_ymf753_post_spdif + }; +@@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = { + .build_specific = patch_wolfson_wm9703_specific, + }; + +@@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = { + .build_specific = patch_wolfson_wm9704_specific, + }; + +@@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = { + .build_specific = patch_wolfson_wm9705_specific, + }; + +@@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = { + .build_specific = patch_wolfson_wm9711_specific, + }; + +@@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97) + } + #endif + +-static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = { + .build_specific = patch_wolfson_wm9713_specific, + .build_3d = patch_wolfson_wm9713_3d, + #ifdef CONFIG_PM +@@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = { ++static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = { + .build_3d = patch_sigmatel_stac9700_3d, + .build_specific = patch_sigmatel_stac97xx_specific + }; +@@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97) + return patch_sigmatel_stac97xx_specific(ac97); + } + +-static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = { ++static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = { + .build_3d = patch_sigmatel_stac9708_3d, + .build_specific = patch_sigmatel_stac9708_specific + }; +@@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = { ++static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = { + .build_3d = patch_sigmatel_stac9700_3d, + .build_specific = patch_sigmatel_stac9758_specific + }; +@@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_cirrus_ops = { ++static const struct snd_ac97_build_ops patch_cirrus_ops = { + .build_spdif = patch_cirrus_build_spdif + }; + +@@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_conexant_ops = { ++static const struct snd_ac97_build_ops patch_conexant_ops = { + .build_spdif = patch_conexant_build_spdif + }; + +@@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int + } + } + +-static struct snd_ac97_build_ops patch_ad1881_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1881_build_ops = { + #ifdef CONFIG_PM + .resume = ad18xx_resume + #endif +@@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_ad1885_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1885_build_ops = { + .build_specific = &patch_ad1885_specific, + #ifdef CONFIG_PM + .resume = ad18xx_resume +@@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_ad1886_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1886_build_ops = { + .build_specific = &patch_ad1886_specific, + #ifdef CONFIG_PM + .resume = ad18xx_resume +@@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97) + ARRAY_SIZE(snd_ac97_ad1981x_jack_sense)); + } + +-static struct snd_ac97_build_ops patch_ad1981a_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1981a_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1981a_specific, + #ifdef CONFIG_PM +@@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97) + ARRAY_SIZE(snd_ac97_ad1981x_jack_sense)); + } + +-static struct snd_ac97_build_ops patch_ad1981b_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1981b_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1981b_specific, + #ifdef CONFIG_PM +@@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97) + return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls)); + } + +-static struct snd_ac97_build_ops patch_ad1888_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1888_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1888_specific, + #ifdef CONFIG_PM +@@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97) + return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1); + } + +-static struct snd_ac97_build_ops patch_ad1980_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1980_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1980_specific, + #ifdef CONFIG_PM +@@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97) + ARRAY_SIZE(snd_ac97_ad1985_controls)); + } + +-static struct snd_ac97_build_ops patch_ad1985_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1985_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1985_specific, + #ifdef CONFIG_PM +@@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97) + ARRAY_SIZE(snd_ac97_ad1985_controls)); + } + +-static struct snd_ac97_build_ops patch_ad1986_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1986_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1986_specific, + #ifdef CONFIG_PM +@@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_alc650_ops = { ++static const struct snd_ac97_build_ops patch_alc650_ops = { + .build_specific = patch_alc650_specific, + .update_jacks = alc650_update_jacks + }; +@@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_alc655_ops = { ++static const struct snd_ac97_build_ops patch_alc655_ops = { + .build_specific = patch_alc655_specific, + .update_jacks = alc655_update_jacks + }; +@@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_alc850_ops = { ++static const struct snd_ac97_build_ops patch_alc850_ops = { + .build_specific = patch_alc850_specific, + .update_jacks = alc850_update_jacks + }; +@@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97) + return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls)); + } + +-static struct snd_ac97_build_ops patch_cm9738_ops = { ++static const struct snd_ac97_build_ops patch_cm9738_ops = { + .build_specific = patch_cm9738_specific, + .update_jacks = cm9738_update_jacks + }; +@@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97) + return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif)); + } + +-static struct snd_ac97_build_ops patch_cm9739_ops = { ++static const struct snd_ac97_build_ops patch_cm9739_ops = { + .build_specific = patch_cm9739_specific, + .build_post_spdif = patch_cm9739_post_spdif, + .update_jacks = cm9739_update_jacks +@@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97) + return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls)); + } + +-static struct snd_ac97_build_ops patch_cm9761_ops = { ++static const struct snd_ac97_build_ops patch_cm9761_ops = { + .build_specific = patch_cm9761_specific, + .build_post_spdif = patch_cm9761_post_spdif, + .update_jacks = cm9761_update_jacks +@@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97) + return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls)); + } + +-static struct snd_ac97_build_ops patch_cm9780_ops = { ++static const struct snd_ac97_build_ops patch_cm9780_ops = { + .build_specific = patch_cm9780_specific, + .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */ + }; +@@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_vt1616_ops = { ++static const struct snd_ac97_build_ops patch_vt1616_ops = { + .build_specific = patch_vt1616_specific + }; + +@@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_it2646_ops = { ++static const struct snd_ac97_build_ops patch_it2646_ops = { + .build_specific = patch_it2646_specific, + .update_jacks = it2646_update_jacks + }; +@@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_si3036_ops = { ++static const struct snd_ac97_build_ops patch_si3036_ops = { + .build_specific = patch_si3036_specific, + }; + +@@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97) + return 0; + } + +-static struct snd_ac97_build_ops patch_ucb1400_ops = { ++static const struct snd_ac97_build_ops patch_ucb1400_ops = { + .build_specific = patch_ucb1400_specific, + }; + +diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h +index 99552fb..4dcc2c5 100644 +--- a/sound/pci/hda/hda_codec.h ++++ b/sound/pci/hda/hda_codec.h +@@ -580,7 +580,7 @@ struct hda_bus_ops { + /* notify power-up/down from codec to controller */ + void (*pm_notify)(struct hda_bus *bus); + #endif +-}; ++} __no_const; + + /* template to pass to the bus constructor */ + struct hda_bus_template { +@@ -675,6 +675,7 @@ struct hda_codec_ops { + int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid); + #endif + }; ++typedef struct hda_codec_ops __no_const hda_codec_ops_no_const; + + /* record for amp information cache */ + struct hda_cache_head { +@@ -705,7 +706,7 @@ struct hda_pcm_ops { + struct snd_pcm_substream *substream); + int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec, + struct snd_pcm_substream *substream); +-}; ++} __no_const; + + /* PCM information for each substream */ + struct hda_pcm_stream { +@@ -760,7 +761,7 @@ struct hda_codec { + const char *modelname; /* model name for preset */ + + /* set by patch */ +- struct hda_codec_ops patch_ops; ++ hda_codec_ops_no_const patch_ops; + + /* PCM to create, set by patch_ops.build_pcms callback */ + unsigned int num_pcms; +diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c +index fb684f0..2b11cea 100644 +--- a/sound/pci/hda/patch_atihdmi.c ++++ b/sound/pci/hda/patch_atihdmi.c +@@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec) + */ + spec->multiout.dig_out_nid = CVT_NID; + +- codec->patch_ops = atihdmi_patch_ops; ++ memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops)); + + return 0; + } +diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c +index 7c23016..c5bfdd7 100644 +--- a/sound/pci/hda/patch_intelhdmi.c ++++ b/sound/pci/hda/patch_intelhdmi.c +@@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) + cp_ready); + + /* TODO */ +- if (cp_state) +- ; +- if (cp_ready) +- ; ++ if (cp_state) { ++ } ++ if (cp_ready) { ++ } + } + + +@@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec) + spec->multiout.dig_out_nid = cvt_nid; + + codec->spec = spec; +- codec->patch_ops = intel_hdmi_patch_ops; ++ memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops)); + + snd_hda_eld_proc_new(codec, &spec->sink_eld); + +diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c +index 6afdab0..68ed352 100644 +--- a/sound/pci/hda/patch_nvhdmi.c ++++ b/sound/pci/hda/patch_nvhdmi.c +@@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec) + spec->multiout.max_channels = 8; + spec->multiout.dig_out_nid = Nv_Master_Convert_nid; + +- codec->patch_ops = nvhdmi_patch_ops_8ch; ++ memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch)); + + return 0; + } +@@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec) + spec->multiout.max_channels = 2; + spec->multiout.dig_out_nid = Nv_Master_Convert_nid; + +- codec->patch_ops = nvhdmi_patch_ops_2ch; ++ memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch)); + + return 0; + } +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 2fcd70d..a143eaf 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -5220,7 +5220,7 @@ again: + snd_hda_codec_write_cache(codec, nid, 0, + AC_VERB_SET_CONNECT_SEL, num_dacs); + +- codec->patch_ops = stac92xx_patch_ops; ++ memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops)); + + codec->proc_widget_hook = stac92hd_proc_hook; + +@@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) + return -ENOMEM; + + codec->spec = spec; +- codec->patch_ops = stac92xx_patch_ops; ++ memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops)); + spec->num_pins = STAC92HD71BXX_NUM_PINS; + switch (codec->vendor_id) { + case 0x111d76b6: +diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h +index d063149..01599a4 100644 +--- a/sound/pci/ice1712/ice1712.h ++++ b/sound/pci/ice1712/ice1712.h +@@ -269,7 +269,7 @@ struct snd_ak4xxx_private { + unsigned int mask_flags; /* total mask bits */ + struct snd_akm4xxx_ops { + void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate); +- } ops; ++ } __no_const ops; + }; + + struct snd_ice1712_spdif { +@@ -285,7 +285,7 @@ struct snd_ice1712_spdif { + int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); + void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); + int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); +- } ops; ++ } __no_const ops; + }; + + +diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c +index 9e7d12e..3e3bc64 100644 +--- a/sound/pci/intel8x0m.c ++++ b/sound/pci/intel8x0m.c +@@ -1264,7 +1264,7 @@ static struct shortname_table { + { 0x5455, "ALi M5455" }, + { 0x746d, "AMD AMD8111" }, + #endif +- { 0 }, ++ { 0, }, + }; + + static int __devinit snd_intel8x0m_probe(struct pci_dev *pci, +diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c +index 5518371..45cf7ac 100644 +--- a/sound/pci/ymfpci/ymfpci_main.c ++++ b/sound/pci/ymfpci/ymfpci_main.c +@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) + if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) + break; + } +- if (atomic_read(&chip->interrupt_sleep_count)) { +- atomic_set(&chip->interrupt_sleep_count, 0); ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + wake_up(&chip->interrupt_sleep); + } + __end: +@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) + continue; + init_waitqueue_entry(&wait, current); + add_wait_queue(&chip->interrupt_sleep, &wait); +- atomic_inc(&chip->interrupt_sleep_count); ++ atomic_inc_unchecked(&chip->interrupt_sleep_count); + schedule_timeout_uninterruptible(msecs_to_jiffies(50)); + remove_wait_queue(&chip->interrupt_sleep, &wait); + } +@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) + snd_ymfpci_writel(chip, YDSXGR_MODE, mode); + spin_unlock(&chip->reg_lock); + +- if (atomic_read(&chip->interrupt_sleep_count)) { +- atomic_set(&chip->interrupt_sleep_count, 0); ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + wake_up(&chip->interrupt_sleep); + } + } +@@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card, + spin_lock_init(&chip->reg_lock); + spin_lock_init(&chip->voice_lock); + init_waitqueue_head(&chip->interrupt_sleep); +- atomic_set(&chip->interrupt_sleep_count, 0); ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + chip->card = card; + chip->pci = pci; + chip->irq = -1; +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index 0a1b2f6..776bb19 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) + } + + /* ASoC PCM operations */ +-static struct snd_pcm_ops soc_pcm_ops = { ++static snd_pcm_ops_no_const soc_pcm_ops = { + .open = soc_pcm_open, + .close = soc_codec_close, + .hw_params = soc_pcm_hw_params, +diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c +index 79633ea..9732e90 100644 +--- a/sound/usb/usbaudio.c ++++ b/sound/usb/usbaudio.c +@@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream, + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: +- subs->ops.prepare = prepare_playback_urb; ++ *(void **)&subs->ops.prepare = prepare_playback_urb; + return 0; + case SNDRV_PCM_TRIGGER_STOP: + return deactivate_urbs(subs, 0, 0); + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: +- subs->ops.prepare = prepare_nodata_playback_urb; ++ *(void **)&subs->ops.prepare = prepare_nodata_playback_urb; + return 0; + default: + return -EINVAL; +@@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream, + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: +- subs->ops.retire = retire_capture_urb; ++ *(void **)&subs->ops.retire = retire_capture_urb; + return start_urbs(subs, substream->runtime); + case SNDRV_PCM_TRIGGER_STOP: + return deactivate_urbs(subs, 0, 0); + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: +- subs->ops.retire = retire_paused_capture_urb; ++ *(void **)&subs->ops.retire = retire_paused_capture_urb; + return 0; + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: +- subs->ops.retire = retire_capture_urb; ++ *(void **)&subs->ops.retire = retire_capture_urb; + return 0; + default: + return -EINVAL; +@@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) + /* for playback, submit the URBs now; otherwise, the first hwptr_done + * updates for all URBs would happen at the same time when starting */ + if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) { +- subs->ops.prepare = prepare_nodata_playback_urb; ++ *(void **)&subs->ops.prepare = prepare_nodata_playback_urb; + return start_urbs(subs, runtime); + } else + return 0; +@@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo + subs->direction = stream; + subs->dev = as->chip->dev; + if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) { +- subs->ops = audio_urb_ops[stream]; ++ memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops)); + } else { +- subs->ops = audio_urb_ops_high_speed[stream]; ++ memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops)); + switch (as->chip->usb_id) { + case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */ + case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */ + case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */ +- subs->ops.retire_sync = retire_playback_sync_urb_hs_emu; ++ *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu; + break; + } + } +diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile +new file mode 100644 +index 0000000..ca64170 +--- /dev/null ++++ b/tools/gcc/Makefile +@@ -0,0 +1,26 @@ ++#CC := gcc ++#PLUGIN_SOURCE_FILES := pax_plugin.c ++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES)) ++GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin) ++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99 ++ ++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb ++CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer ++ ++hostlibs-y := constify_plugin.so ++hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so ++hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so ++hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so ++hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so ++hostlibs-y += colorize_plugin.so ++hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so ++ ++always := $(hostlibs-y) ++ ++constify_plugin-objs := constify_plugin.o ++stackleak_plugin-objs := stackleak_plugin.o ++kallocstat_plugin-objs := kallocstat_plugin.o ++kernexec_plugin-objs := kernexec_plugin.o ++checker_plugin-objs := checker_plugin.o ++colorize_plugin-objs := colorize_plugin.o ++size_overflow_plugin-objs := size_overflow_plugin.o +diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c +new file mode 100644 +index 0000000..d41b5af +--- /dev/null ++++ b/tools/gcc/checker_plugin.c +@@ -0,0 +1,171 @@ ++/* ++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to implement various sparse (source code checker) features ++ * ++ * TODO: ++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch) ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++#include "target.h" ++ ++extern void c_register_addr_space (const char *str, addr_space_t as); ++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t); ++extern enum machine_mode default_addr_space_address_mode (addr_space_t); ++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as); ++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as); ++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as); ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info checker_plugin_info = { ++ .version = "201111150100", ++}; ++ ++#define ADDR_SPACE_KERNEL 0 ++#define ADDR_SPACE_FORCE_KERNEL 1 ++#define ADDR_SPACE_USER 2 ++#define ADDR_SPACE_FORCE_USER 3 ++#define ADDR_SPACE_IOMEM 0 ++#define ADDR_SPACE_FORCE_IOMEM 0 ++#define ADDR_SPACE_PERCPU 0 ++#define ADDR_SPACE_FORCE_PERCPU 0 ++#define ADDR_SPACE_RCU 0 ++#define ADDR_SPACE_FORCE_RCU 0 ++ ++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC); ++} ++ ++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC); ++} ++ ++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_valid_pointer_mode(mode, as); ++} ++ ++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as) ++{ ++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC); ++} ++ ++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_legitimize_address(x, oldx, mode, as); ++} ++ ++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset) ++{ ++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ return subset == superset; ++} ++ ++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type) ++{ ++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type)); ++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type)); ++ ++ return op; ++} ++ ++static void register_checker_address_spaces(void *event_data, void *data) ++{ ++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL); ++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL); ++ c_register_addr_space("__user", ADDR_SPACE_USER); ++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER); ++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM); ++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM); ++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU); ++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU); ++// c_register_addr_space("__rcu", ADDR_SPACE_RCU); ++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU); ++ ++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode; ++ targetm.addr_space.address_mode = checker_addr_space_address_mode; ++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode; ++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p; ++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address; ++ targetm.addr_space.subset_p = checker_addr_space_subset_p; ++ targetm.addr_space.convert = checker_addr_space_convert; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info); ++ ++ for (i = 0; i < argc; ++i) ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c +new file mode 100644 +index 0000000..ee950d0 +--- /dev/null ++++ b/tools/gcc/colorize_plugin.c +@@ -0,0 +1,147 @@ ++/* ++ * Copyright 2012 by PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to colorize diagnostic output ++ * ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info colorize_plugin_info = { ++ .version = "201203092200", ++}; ++ ++#define GREEN "\033[32m\033[2m" ++#define LIGHTGREEN "\033[32m\033[1m" ++#define YELLOW "\033[33m\033[2m" ++#define LIGHTYELLOW "\033[33m\033[1m" ++#define RED "\033[31m\033[2m" ++#define LIGHTRED "\033[31m\033[1m" ++#define BLUE "\033[34m\033[2m" ++#define LIGHTBLUE "\033[34m\033[1m" ++#define BRIGHT "\033[m\033[1m" ++#define NORMAL "\033[m" ++ ++static diagnostic_starter_fn old_starter; ++static diagnostic_finalizer_fn old_finalizer; ++ ++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ const char *color; ++ char *newprefix; ++ ++ switch (diagnostic->kind) { ++ case DK_NOTE: ++ color = LIGHTBLUE; ++ break; ++ ++ case DK_PEDWARN: ++ case DK_WARNING: ++ color = LIGHTYELLOW; ++ break; ++ ++ case DK_ERROR: ++ case DK_FATAL: ++ case DK_ICE: ++ case DK_PERMERROR: ++ case DK_SORRY: ++ color = LIGHTRED; ++ break; ++ ++ default: ++ color = NORMAL; ++ } ++ ++ old_starter(context, diagnostic); ++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix)) ++ return; ++ pp_destroy_prefix(context->printer); ++ pp_set_prefix(context->printer, newprefix); ++} ++ ++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ old_finalizer(context, diagnostic); ++} ++ ++static void colorize_arm(void) ++{ ++ old_starter = diagnostic_starter(global_dc); ++ old_finalizer = diagnostic_finalizer(global_dc); ++ ++ diagnostic_starter(global_dc) = start_colorize; ++ diagnostic_finalizer(global_dc) = finalize_colorize; ++} ++ ++static unsigned int execute_colorize_rearm(void) ++{ ++ if (diagnostic_starter(global_dc) == start_colorize) ++ return 0; ++ ++ colorize_arm(); ++ return 0; ++} ++ ++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = { ++ .pass = { ++ .type = SIMPLE_IPA_PASS, ++ .name = "colorize_rearm", ++ .gate = NULL, ++ .execute = execute_colorize_rearm, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static void colorize_start_unit(void *gcc_data, void *user_data) ++{ ++ colorize_arm(); ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info colorize_rearm_pass_info = { ++ .pass = &pass_ipa_colorize_rearm.pass, ++ .reference_pass_name = "*free_lang_data", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info); ++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info); ++ return 0; ++} +diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c +new file mode 100644 +index 0000000..88a7438 +--- /dev/null ++++ b/tools/gcc/constify_plugin.c +@@ -0,0 +1,303 @@ ++/* ++ * Copyright 2011 by Emese Revfy <re.emese@gmail.com> ++ * Copyright 2011 by PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification. ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/const_plugin/ ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c ++ * $ gcc -fplugin=constify_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info const_plugin_info = { ++ .version = "201111150100", ++ .help = "no-constify\tturn off constification\n", ++}; ++ ++static void constify_type(tree type); ++static bool walk_struct(tree node); ++ ++static tree deconstify_type(tree old_type) ++{ ++ tree new_type, field; ++ ++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST); ++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type)); ++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field)) ++ DECL_FIELD_CONTEXT(field) = new_type; ++ TYPE_READONLY(new_type) = 0; ++ C_TYPE_FIELDS_READONLY(new_type) = 0; ++ return new_type; ++} ++ ++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ tree type; ++ ++ *no_add_attrs = true; ++ if (TREE_CODE(*node) == FUNCTION_DECL) { ++ error("%qE attribute does not apply to functions", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == VAR_DECL) { ++ error("%qE attribute does not apply to variables", name); ++ return NULL_TREE; ++ } ++ ++ if (TYPE_P(*node)) { ++ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE) ++ *no_add_attrs = false; ++ else ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ type = TREE_TYPE(*node); ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) { ++ error("%qE attribute is already applied to the type", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) { ++ error("%qE attribute used on type that is not constified", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL) { ++ TREE_TYPE(*node) = deconstify_type(type); ++ TREE_READONLY(*node) = 0; ++ return NULL_TREE; ++ } ++ ++ return NULL_TREE; ++} ++ ++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = true; ++ if (!TYPE_P(*node)) { ++ error("%qE attribute applies to types only", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ *no_add_attrs = false; ++ constify_type(*node); ++ return NULL_TREE; ++} ++ ++static struct attribute_spec no_const_attr = { ++ .name = "no_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_no_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static struct attribute_spec do_const_attr = { ++ .name = "do_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_do_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&no_const_attr); ++ register_attribute(&do_const_attr); ++} ++ ++static void constify_type(tree type) ++{ ++ TYPE_READONLY(type) = 1; ++ C_TYPE_FIELDS_READONLY(type) = 1; ++} ++ ++static bool is_fptr(tree field) ++{ ++ tree ptr = TREE_TYPE(field); ++ ++ if (TREE_CODE(ptr) != POINTER_TYPE) ++ return false; ++ ++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE; ++} ++ ++static bool walk_struct(tree node) ++{ ++ tree field; ++ ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) ++ return false; ++ ++ if (TYPE_FIELDS(node) == NULL_TREE) ++ return false; ++ ++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) { ++ tree type = TREE_TYPE(field); ++ enum tree_code code = TREE_CODE(type); ++ if (code == RECORD_TYPE || code == UNION_TYPE) { ++ if (!(walk_struct(type))) ++ return false; ++ } else if (!is_fptr(field) && !TREE_READONLY(field)) ++ return false; ++ } ++ return true; ++} ++ ++static void finish_type(void *event_data, void *data) ++{ ++ tree type = (tree)event_data; ++ ++ if (type == NULL_TREE) ++ return; ++ ++ if (TYPE_READONLY(type)) ++ return; ++ ++ if (walk_struct(type)) ++ constify_type(type); ++} ++ ++static unsigned int check_local_variables(void); ++ ++struct gimple_opt_pass pass_local_variable = { ++ { ++ .type = GIMPLE_PASS, ++ .name = "check_local_variables", ++ .gate = NULL, ++ .execute = check_local_variables, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static unsigned int check_local_variables(void) ++{ ++ tree var; ++ referenced_var_iterator rvi; ++ ++#if BUILDING_GCC_VERSION == 4005 ++ FOR_EACH_REFERENCED_VAR(var, rvi) { ++#else ++ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) { ++#endif ++ tree type = TREE_TYPE(var); ++ ++ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var)) ++ continue; ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ ++ if (!TYPE_READONLY(type)) ++ continue; ++ ++// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var))) ++// continue; ++ ++// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) ++// continue; ++ ++ if (walk_struct(type)) { ++ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ bool constify = true; ++ ++ struct register_pass_info local_variable_pass_info = { ++ .pass = &pass_local_variable.pass, ++ .reference_pass_name = "*referenced_vars", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!(strcmp(argv[i].key, "no-constify"))) { ++ constify = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info); ++ if (constify) { ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c +new file mode 100644 +index 0000000..a5eabce +--- /dev/null ++++ b/tools/gcc/kallocstat_plugin.c +@@ -0,0 +1,167 @@ ++/* ++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to find the distribution of k*alloc sizes ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static const char * const kalloc_functions[] = { ++ "__kmalloc", ++ "kmalloc", ++ "kmalloc_large", ++ "kmalloc_node", ++ "kmalloc_order", ++ "kmalloc_order_trace", ++ "kmalloc_slab", ++ "kzalloc", ++ "kzalloc_node", ++}; ++ ++static struct plugin_info kallocstat_plugin_info = { ++ .version = "201111150100", ++}; ++ ++static unsigned int execute_kallocstat(void); ++ ++static struct gimple_opt_pass kallocstat_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kallocstat", ++ .gate = NULL, ++ .execute = execute_kallocstat, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static bool is_kalloc(const char *fnname) ++{ ++ size_t i; ++ ++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++) ++ if (!strcmp(fnname, kalloc_functions[i])) ++ return true; ++ return false; ++} ++ ++static unsigned int execute_kallocstat(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: ++ tree fndecl, size; ++ gimple call_stmt; ++ const char *fnname; ++ ++ // is it a call ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fndecl = gimple_call_fndecl(call_stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (TREE_CODE(fndecl) != FUNCTION_DECL) ++ continue; ++ ++ // is it a call to k*alloc ++ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl)); ++ if (!is_kalloc(fnname)) ++ continue; ++ ++ // is the size arg the result of a simple const assignment ++ size = gimple_call_arg(call_stmt, 0); ++ while (true) { ++ gimple def_stmt; ++ expanded_location xloc; ++ size_t size_val; ++ ++ if (TREE_CODE(size) != SSA_NAME) ++ break; ++ def_stmt = SSA_NAME_DEF_STMT(size); ++ if (!def_stmt || !is_gimple_assign(def_stmt)) ++ break; ++ if (gimple_num_ops(def_stmt) != 2) ++ break; ++ size = gimple_assign_rhs1(def_stmt); ++ if (!TREE_CONSTANT(size)) ++ continue; ++ xloc = expand_location(gimple_location(def_stmt)); ++ if (!xloc.file) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ size_val = TREE_INT_CST_LOW(size); ++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line); ++ break; ++ } ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_node(stderr, "pax", fndecl, 4); ++ } ++ } ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info kallocstat_pass_info = { ++ .pass = &kallocstat_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info); ++ ++ return 0; ++} +diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c +new file mode 100644 +index 0000000..d8a8da2 +--- /dev/null ++++ b/tools/gcc/kernexec_plugin.c +@@ -0,0 +1,427 @@ ++/* ++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386 ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info kernexec_plugin_info = { ++ .version = "201111291120", ++ .help = "method=[bts|or]\tinstrumentation method\n" ++}; ++ ++static unsigned int execute_kernexec_reload(void); ++static unsigned int execute_kernexec_fptr(void); ++static unsigned int execute_kernexec_retaddr(void); ++static bool kernexec_cmodel_check(void); ++ ++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *); ++static void (*kernexec_instrument_retaddr)(rtx); ++ ++static struct gimple_opt_pass kernexec_reload_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_reload", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_reload, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct gimple_opt_pass kernexec_fptr_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_fptr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_fptr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct rtl_opt_pass kernexec_retaddr_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "kernexec_retaddr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_retaddr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect ++ } ++}; ++ ++static bool kernexec_cmodel_check(void) ++{ ++ tree section; ++ ++ if (ix86_cmodel != CM_KERNEL) ++ return false; ++ ++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); ++ if (!section || !TREE_VALUE(section)) ++ return true; ++ ++ section = TREE_VALUE(TREE_VALUE(section)); ++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10)) ++ return true; ++ ++ return false; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered ++ */ ++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_movabs_stmt; ++ ++ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : ); ++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL); ++ gimple_asm_set_volatile(asm_movabs_stmt, true); ++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(asm_movabs_stmt); ++} ++ ++/* ++ * find all asm() stmts that clobber r10 and add a reload of r10 ++ */ ++static unsigned int execute_kernexec_reload(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: __asm__ ("" : : : "r10"); ++ gimple asm_stmt; ++ size_t nclobbers; ++ ++ // is it an asm ... ++ asm_stmt = gsi_stmt(gsi); ++ if (gimple_code(asm_stmt) != GIMPLE_ASM) ++ continue; ++ ++ // ... clobbering r10 ++ nclobbers = gimple_asm_nclobbers(asm_stmt); ++ while (nclobbers--) { ++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); ++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10")) ++ continue; ++ kernexec_reload_fptr_mask(&gsi); ++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO); ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce ++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference ++ */ ++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) ++{ ++ gimple assign_intptr, assign_new_fptr, call_stmt; ++ tree intptr, old_fptr, new_fptr, kernexec_mask; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary unsigned long variable used for bitops and cast fptr to it ++ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); ++ add_referenced_var(intptr); ++ mark_sym_for_renaming(intptr); ++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // apply logical or to temporary unsigned long and bitmask ++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); ++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); ++ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // cast temporary unsigned long back to a temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); ++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); ++ update_stmt(assign_new_fptr); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_or_stmt, call_stmt; ++ tree old_fptr, new_fptr, input, output; ++ VEC(tree, gc) *inputs = NULL; ++ VEC(tree, gc) *outputs = NULL; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ ++ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); ++ input = build_tree_list(NULL_TREE, build_string(2, "0")); ++ input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); ++ output = build_tree_list(NULL_TREE, build_string(3, "=r")); ++ output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); ++ VEC_safe_push(tree, gc, inputs, input); ++ VEC_safe_push(tree, gc, outputs, output); ++ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); ++ gimple_asm_set_volatile(asm_or_stmt, true); ++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); ++ update_stmt(asm_or_stmt); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++/* ++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer ++ */ ++static unsigned int execute_kernexec_fptr(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); ++ tree fn; ++ gimple call_stmt; ++ ++ // is it a call ... ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fn = gimple_call_fn(call_stmt); ++ if (TREE_CODE(fn) == ADDR_EXPR) ++ continue; ++ if (TREE_CODE(fn) != SSA_NAME) ++ gcc_unreachable(); ++ ++ // ... through a function pointer ++ fn = SSA_NAME_VAR(fn); ++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != POINTER_TYPE) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != FUNCTION_TYPE) ++ continue; ++ ++ kernexec_instrument_fptr(&gsi); ++ ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++ } ++ } ++ ++ return 0; ++} ++ ++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn ++static void kernexec_instrument_retaddr_bts(rtx insn) ++{ ++ rtx btsq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("btsq $63,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(btsq) = 1; ++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(btsq, insn); ++} ++ ++// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn ++static void kernexec_instrument_retaddr_or(rtx insn) ++{ ++ rtx orq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("orq %%r10,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(orq) = 1; ++// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(orq, insn); ++} ++ ++/* ++ * find all asm level function returns and forcibly set the highest bit of the return address ++ */ ++static unsigned int execute_kernexec_retaddr(void) ++{ ++ rtx insn; ++ ++ // 1. find function returns ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) ++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) ++ rtx body; ++ ++ // is it a retn ++ if (!JUMP_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) == PARALLEL) ++ body = XVECEXP(body, 0, 0); ++ if (GET_CODE(body) != RETURN) ++ continue; ++ kernexec_instrument_retaddr(insn); ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info kernexec_reload_pass_info = { ++ .pass = &kernexec_reload_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_fptr_pass_info = { ++ .pass = &kernexec_fptr_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_retaddr_pass_info = { ++ .pass = &kernexec_retaddr_pass.pass, ++ .reference_pass_name = "pro_and_epilogue", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "method")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ if (!strcmp(argv[i].value, "bts")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_bts; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; ++ } else if (!strcmp(argv[i].value, "or")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_or; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or; ++ fix_register("r10", 1, 1); ++ } else ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) ++ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); ++ ++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or) ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); ++ ++ return 0; ++} +diff --git a/tools/gcc/size_overflow_hash.h b/tools/gcc/size_overflow_hash.h +new file mode 100644 +index 0000000..6e18418 +--- /dev/null ++++ b/tools/gcc/size_overflow_hash.h +@@ -0,0 +1,10651 @@ ++struct size_overflow_hash _000001_hash = { ++ .next = NULL, ++ .name = "__alloc_percpu", ++ .file = "include/linux/percpu.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000002_hash = { ++ .next = NULL, ++ .name = "kmalloc_node", ++ .file = "include/linux/slab.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000003_hash = { ++ .next = NULL, ++ .name = "alloc_dr", ++ .file = "drivers/base/devres.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000004_hash = { ++ .next = NULL, ++ .name = "__copy_from_user", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000005_hash = { ++ .next = NULL, ++ .name = "__copy_from_user_inatomic", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000006_hash = { ++ .next = NULL, ++ .name = "__copy_from_user_nocache", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000007_hash = { ++ .next = NULL, ++ .name = "__copy_to_user_inatomic", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000008_hash = { ++ .next = NULL, ++ .name = "do_xip_mapping_read", ++ .file = "mm/filemap_xip.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000009_hash = { ++ .next = NULL, ++ .name = "hugetlbfs_read", ++ .file = "fs/hugetlbfs/inode.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000010_hash = { ++ .next = NULL, ++ .name = "kmalloc", ++ .file = "include/linux/slub_def.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000011_hash = { ++ .next = NULL, ++ .name = "__kmalloc", ++ .file = "include/linux/slub_def.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000012_hash = { ++ .next = NULL, ++ .name = "kmalloc_slab", ++ .file = "include/linux/slub_def.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000013_hash = { ++ .next = NULL, ++ .name = "kmemdup", ++ .file = "include/linux/string.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000014_hash = { ++ .next = NULL, ++ .name = "__krealloc", ++ .file = "include/linux/slab.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000015_hash = { ++ .next = NULL, ++ .name = "memdup_user", ++ .file = "include/linux/string.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000016_hash = { ++ .next = NULL, ++ .name = "read_default_ldt", ++ .file = "arch/x86/kernel/ldt.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000017_hash = { ++ .next = NULL, ++ .name = "read_kcore", ++ .file = "fs/proc/kcore.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000018_hash = { ++ .next = NULL, ++ .name = "read_ldt", ++ .file = "arch/x86/kernel/ldt.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000019_hash = { ++ .next = NULL, ++ .name = "read_zero", ++ .file = "drivers/char/mem.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000020_hash = { ++ .next = NULL, ++ .name = "snmp_mib_init", ++ .file = "include/net/ip.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000021_hash = { ++ .next = NULL, ++ .name = "vmalloc", ++ .file = "include/linux/vmalloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000022_hash = { ++ .next = NULL, ++ .name = "__vmalloc", ++ .file = "include/linux/vmalloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000023_hash = { ++ .next = NULL, ++ .name = "vmalloc_32", ++ .file = "include/linux/vmalloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000024_hash = { ++ .next = NULL, ++ .name = "vmalloc_32_user", ++ .file = "include/linux/vmalloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000025_hash = { ++ .next = NULL, ++ .name = "vmalloc_exec", ++ .file = "include/linux/vmalloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000026_hash = { ++ .next = NULL, ++ .name = "vmalloc_node", ++ .file = "include/linux/vmalloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000027_hash = { ++ .next = NULL, ++ .name = "vmalloc_user", ++ .file = "include/linux/vmalloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000028_hash = { ++ .next = NULL, ++ .name = "vm_map_ram", ++ .file = "include/linux/vmalloc.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000029_hash = { ++ .next = NULL, ++ .name = "acpi_os_allocate", ++ .file = "include/acpi/platform/aclinux.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000030_hash = { ++ .next = NULL, ++ .name = "ahash_setkey_unaligned", ++ .file = "crypto/ahash.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000031_hash = { ++ .next = NULL, ++ .name = "alloc_fdmem", ++ .file = "fs/file.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000032_hash = { ++ .next = NULL, ++ .name = "alloc_large_system_hash", ++ .file = "include/linux/bootmem.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000033_hash = { ++ .next = NULL, ++ .name = "audit_unpack_string", ++ .file = "kernel/auditfilter.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000034_hash = { ++ .next = NULL, ++ .name = "blkcipher_copy_iv", ++ .file = "crypto/blkcipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000035_hash = { ++ .next = NULL, ++ .name = "blkcipher_next_slow", ++ .file = "crypto/blkcipher.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000036_hash = { ++ .next = NULL, ++ .name = "cgroup_write_string", ++ .file = "kernel/cgroup.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000037_hash = { ++ .next = NULL, ++ .name = "copy_from_user", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000038_hash = { ++ .next = NULL, ++ .name = "__copy_to_user", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000039_hash = { ++ .next = NULL, ++ .name = "devm_kzalloc", ++ .file = "include/linux/device.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000040_hash = { ++ .next = NULL, ++ .name = "devres_alloc", ++ .file = "include/linux/device.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000041_hash = { ++ .next = NULL, ++ .name = "do_ip_setsockopt", ++ .file = "net/ipv4/ip_sockglue.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000042_hash = { ++ .next = NULL, ++ .name = "do_tty_write", ++ .file = "drivers/char/tty_io.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000043_hash = { ++ .next = NULL, ++ .name = "file_read_actor", ++ .file = "include/linux/fs.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000044_hash = { ++ .next = NULL, ++ .name = "hashtab_create", ++ .file = "security/selinux/ss/hashtab.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000045_hash = { ++ .next = NULL, ++ .name = "heap_init", ++ .file = "include/linux/prio_heap.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000046_hash = { ++ .next = NULL, ++ .name = "ima_write_policy", ++ .file = "security/integrity/ima/ima_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000047_hash = { ++ .next = NULL, ++ .name = "iov_iter_copy_from_user", ++ .file = "include/linux/fs.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000048_hash = { ++ .next = NULL, ++ .name = "iov_iter_copy_from_user_atomic", ++ .file = "include/linux/fs.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000049_hash = { ++ .next = NULL, ++ .name = "kcalloc", ++ .file = "include/linux/slab.h", ++ .param1 = 1, ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000051_hash = { ++ .next = NULL, ++ .name = "keyctl_instantiate_key", ++ .file = "security/keys/keyctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000052_hash = { ++ .next = NULL, ++ .name = "keyctl_update_key", ++ .file = "security/keys/keyctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000053_hash = { ++ .next = NULL, ++ .name = "kfifo_alloc", ++ .file = "include/linux/kfifo.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000054_hash = { ++ .next = NULL, ++ .name = "kmsg_write", ++ .file = "drivers/char/mem.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000055_hash = { ++ .next = NULL, ++ .name = "kobj_map", ++ .file = "include/linux/kobj_map.h", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000057_hash = { ++ .next = NULL, ++ .name = "krealloc", ++ .file = "include/linux/slab.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000058_hash = { ++ .next = NULL, ++ .name = "kzalloc", ++ .file = "include/linux/slab.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000059_hash = { ++ .next = NULL, ++ .name = "listxattr", ++ .file = "fs/xattr.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000060_hash = { ++ .next = NULL, ++ .name = "load_module", ++ .file = "kernel/module.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000061_hash = { ++ .next = NULL, ++ .name = "mempool_kmalloc", ++ .file = "include/linux/mempool.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000062_hash = { ++ .next = NULL, ++ .name = "pipe_iov_copy_from_user", ++ .file = "fs/pipe.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000063_hash = { ++ .next = NULL, ++ .name = "pipe_iov_copy_to_user", ++ .file = "fs/pipe.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000064_hash = { ++ .next = NULL, ++ .name = "platform_device_add_data", ++ .file = "include/linux/platform_device.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000065_hash = { ++ .next = NULL, ++ .name = "platform_device_add_resources", ++ .file = "include/linux/platform_device.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000066_hash = { ++ .next = NULL, ++ .name = "probe_kernel_read", ++ .file = "include/linux/uaccess.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000067_hash = { ++ .next = NULL, ++ .name = "qdisc_class_hash_alloc", ++ .file = "net/sched/sch_api.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000068_hash = { ++ .next = NULL, ++ .name = "read", ++ .file = "fs/sysfs/bin.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000069_hash = { ++ .next = NULL, ++ .name = "regset_tls_set", ++ .file = "arch/x86/kernel/tls.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000070_hash = { ++ .next = NULL, ++ .name = "relay_alloc_page_array", ++ .file = "kernel/relay.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000071_hash = { ++ .next = NULL, ++ .name = "request_key_auth_new", ++ .file = "security/keys/request_key_auth.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000072_hash = { ++ .next = NULL, ++ .name = "restore_i387_fxsave", ++ .file = "arch/x86/kernel/i387.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000073_hash = { ++ .next = NULL, ++ .name = "rngapi_reset", ++ .file = "crypto/rng.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000074_hash = { ++ .next = NULL, ++ .name = "rw_copy_check_uvector", ++ .file = "include/linux/fs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000075_hash = { ++ .next = NULL, ++ .name = "security_context_to_sid_core", ++ .file = "security/selinux/ss/services.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000076_hash = { ++ .next = NULL, ++ .name = "sel_write_load", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000077_hash = { ++ .next = NULL, ++ .name = "setkey_unaligned", ++ .file = "crypto/ablkcipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000078_hash = { ++ .next = NULL, ++ .name = "setkey_unaligned", ++ .file = "crypto/blkcipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000079_hash = { ++ .next = NULL, ++ .name = "setkey_unaligned", ++ .file = "crypto/aead.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000080_hash = { ++ .next = NULL, ++ .name = "setkey_unaligned", ++ .file = "crypto/cipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000081_hash = { ++ .next = NULL, ++ .name = "setxattr", ++ .file = "fs/xattr.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000082_hash = { ++ .next = NULL, ++ .name = "sg_kmalloc", ++ .file = "lib/scatterlist.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000083_hash = { ++ .next = NULL, ++ .name = "shash_setkey_unaligned", ++ .file = "crypto/shash.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000084_hash = { ++ .next = NULL, ++ .name = "spi_register_board_info", ++ .file = "include/linux/spi/spi.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000085_hash = { ++ .next = NULL, ++ .name = "swap_cgroup_swapon", ++ .file = "include/linux/page_cgroup.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000086_hash = { ++ .next = NULL, ++ .name = "sys_add_key", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000087_hash = { ++ .next = NULL, ++ .name = "sys_modify_ldt", ++ .file = "arch/x86/include/asm/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000088_hash = { ++ .next = NULL, ++ .name = "sys_semtimedop", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000089_hash = { ++ .next = NULL, ++ .name = "tty_buffer_alloc", ++ .file = "drivers/char/tty_buffer.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000090_hash = { ++ .next = NULL, ++ .name = "user_instantiate", ++ .file = "include/keys/user-type.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000091_hash = { ++ .next = NULL, ++ .name = "user_update", ++ .file = "include/keys/user-type.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000092_hash = { ++ .next = NULL, ++ .name = "vc_do_resize", ++ .file = "drivers/char/vt.c", ++ .param3 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000094_hash = { ++ .next = NULL, ++ .name = "vga_arb_write", ++ .file = "drivers/gpu/vga/vgaarb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000095_hash = { ++ .next = NULL, ++ .name = "write", ++ .file = "fs/sysfs/bin.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000096_hash = { ++ .next = NULL, ++ .name = "xfrm_hash_alloc", ++ .file = "net/xfrm/xfrm_hash.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000097_hash = { ++ .next = NULL, ++ .name = "__xip_file_write", ++ .file = "mm/filemap_xip.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000098_hash = { ++ .next = NULL, ++ .name = "acpi_ex_allocate_name_string", ++ .file = "drivers/acpi/acpica/exnames.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000099_hash = { ++ .next = NULL, ++ .name = "acpi_os_allocate_zeroed", ++ .file = "include/acpi/platform/aclinux.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000100_hash = { ++ .next = NULL, ++ .name = "acpi_system_debug_proc_write", ++ .file = "drivers/acpi/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000101_hash = { ++ .next = NULL, ++ .name = "acpi_system_write_sleep", ++ .file = "drivers/acpi/proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000102_hash = { ++ .next = NULL, ++ .name = "acpi_system_write_wakeup_device", ++ .file = "drivers/acpi/proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000103_hash = { ++ .next = NULL, ++ .name = "acpi_ut_initialize_buffer", ++ .file = "drivers/acpi/acpica/utalloc.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000104_hash = { ++ .next = NULL, ++ .name = "alloc_arch_preferred_bootmem", ++ .file = "mm/bootmem.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000105_hash = { ++ .next = NULL, ++ .name = "alloc_netdev_mq", ++ .file = "include/linux/netdevice.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000106_hash = { ++ .next = NULL, ++ .name = "audit_init_entry", ++ .file = "kernel/auditfilter.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000107_hash = { ++ .next = NULL, ++ .name = "blk_register_region", ++ .file = "include/linux/genhd.h", ++ .param1 = 1, ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000109_hash = { ++ .next = NULL, ++ .name = "cdev_add", ++ .file = "include/linux/cdev.h", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000111_hash = { ++ .next = NULL, ++ .name = "cgroup_write_X64", ++ .file = "kernel/cgroup.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000112_hash = { ++ .next = NULL, ++ .name = "clear_refs_write", ++ .file = "fs/proc/task_mmu.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000113_hash = { ++ .next = NULL, ++ .name = "copy_to_user", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000114_hash = { ++ .next = NULL, ++ .name = "copy_vm86_regs_from_user", ++ .file = "arch/x86/kernel/vm86_32.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000115_hash = { ++ .next = NULL, ++ .name = "crypto_ahash_setkey", ++ .file = "include/crypto/hash.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000116_hash = { ++ .next = NULL, ++ .name = "crypto_alloc_instance2", ++ .file = "include/crypto/algapi.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000117_hash = { ++ .next = NULL, ++ .name = "crypto_shash_setkey", ++ .file = "include/crypto/hash.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000118_hash = { ++ .next = NULL, ++ .name = "csum_partial_copy_fromiovecend", ++ .file = "include/linux/socket.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000119_hash = { ++ .next = NULL, ++ .name = "ddebug_proc_write", ++ .file = "lib/dynamic_debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000120_hash = { ++ .next = NULL, ++ .name = "dev_set_alias", ++ .file = "include/linux/netdevice.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000121_hash = { ++ .next = NULL, ++ .name = "do_kimage_alloc", ++ .file = "kernel/kexec.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000122_hash = { ++ .next = NULL, ++ .name = "do_readv_writev", ++ .file = "fs/read_write.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000123_hash = { ++ .next = NULL, ++ .name = "fill_write_buffer", ++ .file = "fs/sysfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000124_hash = { ++ .next = NULL, ++ .name = "get_user_cpu_mask", ++ .file = "kernel/sched.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000125_hash = { ++ .next = NULL, ++ .name = "getxattr", ++ .file = "fs/xattr.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000126_hash = { ++ .next = NULL, ++ .name = "hugetlbfs_read_actor", ++ .file = "fs/hugetlbfs/inode.c", ++ .param2 = 1, ++ .param5 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000129_hash = { ++ .next = NULL, ++ .name = "mtrr_write", ++ .file = "arch/x86/kernel/cpu/mtrr/if.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000130_hash = { ++ .next = NULL, ++ .name = "neigh_hash_alloc", ++ .file = "net/core/neighbour.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000131_hash = { ++ .next = NULL, ++ .name = "nl_pid_hash_zalloc", ++ .file = "net/netlink/af_netlink.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000132_hash = { ++ .next = NULL, ++ .name = "oom_adjust_write", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000133_hash = { ++ .next = NULL, ++ .name = "pci_add_cap_save_buffer", ++ .file = "drivers/pci/pci.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000134_hash = { ++ .next = NULL, ++ .name = "platform_device_register_data", ++ .file = "include/linux/platform_device.h", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000135_hash = { ++ .next = NULL, ++ .name = "platform_device_register_simple", ++ .file = "include/linux/platform_device.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000136_hash = { ++ .next = NULL, ++ .name = "proc_coredump_filter_write", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000137_hash = { ++ .next = NULL, ++ .name = "proc_loginuid_write", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000138_hash = { ++ .next = NULL, ++ .name = "proc_pid_attr_write", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000139_hash = { ++ .next = NULL, ++ .name = "proc_write_node", ++ .file = "drivers/pnp/pnpbios/proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000140_hash = { ++ .next = NULL, ++ .name = "sched_feat_write", ++ .file = "kernel/sched.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000141_hash = { ++ .next = NULL, ++ .name = "security_context_to_sid", ++ .file = "security/selinux/ss/services.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000142_hash = { ++ .next = NULL, ++ .name = "security_context_to_sid_default", ++ .file = "security/selinux/ss/services.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000143_hash = { ++ .next = NULL, ++ .name = "security_context_to_sid_force", ++ .file = "security/selinux/ss/services.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000144_hash = { ++ .next = NULL, ++ .name = "sel_commit_bools_write", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000145_hash = { ++ .next = NULL, ++ .name = "sel_write_access", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000146_hash = { ++ .next = NULL, ++ .name = "sel_write_avc_cache_threshold", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000147_hash = { ++ .next = NULL, ++ .name = "sel_write_bool", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000148_hash = { ++ .next = NULL, ++ .name = "sel_write_checkreqprot", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000149_hash = { ++ .next = NULL, ++ .name = "sel_write_create", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000150_hash = { ++ .next = NULL, ++ .name = "sel_write_disable", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000151_hash = { ++ .next = NULL, ++ .name = "sel_write_enforce", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000152_hash = { ++ .next = NULL, ++ .name = "sel_write_member", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000153_hash = { ++ .next = NULL, ++ .name = "sel_write_relabel", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000154_hash = { ++ .next = NULL, ++ .name = "sel_write_user", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000155_hash = { ++ .next = NULL, ++ .name = "setkey", ++ .file = "crypto/cipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000156_hash = { ++ .next = NULL, ++ .name = "setkey", ++ .file = "crypto/ablkcipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000157_hash = { ++ .next = NULL, ++ .name = "setkey", ++ .file = "crypto/aead.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000158_hash = { ++ .next = NULL, ++ .name = "setkey", ++ .file = "crypto/blkcipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000159_hash = { ++ .next = NULL, ++ .name = "simple_transaction_get", ++ .file = "include/linux/fs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000160_hash = { ++ .next = NULL, ++ .name = "smk_write_ambient", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000161_hash = { ++ .next = NULL, ++ .name = "smk_write_cipso", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000162_hash = { ++ .next = NULL, ++ .name = "smk_write_direct", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000163_hash = { ++ .next = NULL, ++ .name = "smk_write_doi", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000164_hash = { ++ .next = NULL, ++ .name = "smk_write_load", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000165_hash = { ++ .next = &_000069_hash, ++ .name = "smk_write_logging", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000166_hash = { ++ .next = NULL, ++ .name = "smk_write_netlbladdr", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000167_hash = { ++ .next = NULL, ++ .name = "smk_write_onlycap", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000168_hash = { ++ .next = NULL, ++ .name = "spi_alloc_master", ++ .file = "include/linux/spi/spi.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000169_hash = { ++ .next = NULL, ++ .name = "sysctl_data", ++ .file = "include/linux/sysctl.h", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000170_hash = { ++ .next = NULL, ++ .name = "sysctl_ipc_data", ++ .file = "ipc/ipc_sysctl.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000171_hash = { ++ .next = NULL, ++ .name = "sysctl_string", ++ .file = "include/linux/sysctl.h", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000172_hash = { ++ .next = NULL, ++ .name = "sys_flistxattr", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000173_hash = { ++ .next = NULL, ++ .name = "sys_fsetxattr", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000174_hash = { ++ .next = NULL, ++ .name = "sys_init_module", ++ .file = "include/linux/syscalls.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000175_hash = { ++ .next = NULL, ++ .name = "sys_keyctl", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000176_hash = { ++ .next = NULL, ++ .name = "sys_listxattr", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000177_hash = { ++ .next = NULL, ++ .name = "sys_llistxattr", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000178_hash = { ++ .next = NULL, ++ .name = "sys_lsetxattr", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000179_hash = { ++ .next = NULL, ++ .name = "sys_semop", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000180_hash = { ++ .next = NULL, ++ .name = "sys_setxattr", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000181_hash = { ++ .next = NULL, ++ .name = "tomoyo_alloc", ++ .file = "security/tomoyo/realpath.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000182_hash = { ++ .next = NULL, ++ .name = "tpm_write", ++ .file = "drivers/char/tpm/tpm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000183_hash = { ++ .next = NULL, ++ .name = "tty_write", ++ .file = "drivers/char/tty_io.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000184_hash = { ++ .next = NULL, ++ .name = "vc_resize", ++ .file = "include/linux/vt_kern.h", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000186_hash = { ++ .next = NULL, ++ .name = "vcs_write", ++ .file = "drivers/char/vc_screen.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000187_hash = { ++ .next = NULL, ++ .name = "write_mem", ++ .file = "drivers/char/mem.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000188_hash = { ++ .next = NULL, ++ .name = "acpi_ds_build_internal_package_obj", ++ .file = "drivers/acpi/acpica/dsobject.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000189_hash = { ++ .next = NULL, ++ .name = "acpi_system_read_event", ++ .file = "drivers/acpi/event.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000190_hash = { ++ .next = NULL, ++ .name = "acpi_ut_create_buffer_object", ++ .file = "drivers/acpi/acpica/utobject.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000191_hash = { ++ .next = NULL, ++ .name = "acpi_ut_create_package_object", ++ .file = "drivers/acpi/acpica/utobject.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000192_hash = { ++ .next = NULL, ++ .name = "acpi_ut_create_string_object", ++ .file = "drivers/acpi/acpica/utobject.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000193_hash = { ++ .next = NULL, ++ .name = "alloc_etherdev_mq", ++ .file = "include/linux/etherdevice.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000194_hash = { ++ .next = NULL, ++ .name = "async_setkey", ++ .file = "crypto/blkcipher.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000195_hash = { ++ .next = NULL, ++ .name = "copy_oldmem_page", ++ .file = "include/linux/crash_dump.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000196_hash = { ++ .next = NULL, ++ .name = "do_sigpending", ++ .file = "include/linux/signal.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000197_hash = { ++ .next = NULL, ++ .name = "do_sysctl_strategy", ++ .file = "kernel/sysctl.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000199_hash = { ++ .next = NULL, ++ .name = "keyctl_describe_key", ++ .file = "security/keys/keyctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000200_hash = { ++ .next = NULL, ++ .name = "keyctl_get_security", ++ .file = "security/keys/keyctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000201_hash = { ++ .next = NULL, ++ .name = "keyring_read", ++ .file = "security/keys/keyring.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000202_hash = { ++ .next = NULL, ++ .name = "kimage_crash_alloc", ++ .file = "kernel/kexec.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000203_hash = { ++ .next = NULL, ++ .name = "kimage_normal_alloc", ++ .file = "kernel/kexec.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000204_hash = { ++ .next = NULL, ++ .name = "neigh_hash_grow", ++ .file = "net/core/neighbour.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000205_hash = { ++ .next = NULL, ++ .name = "__proc_file_read", ++ .file = "fs/proc/generic.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000206_hash = { ++ .next = NULL, ++ .name = "read_mem", ++ .file = "drivers/char/mem.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000207_hash = { ++ .next = NULL, ++ .name = "read_profile", ++ .file = "kernel/profile.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000208_hash = { ++ .next = NULL, ++ .name = "read_vmcore", ++ .file = "fs/proc/vmcore.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000209_hash = { ++ .next = NULL, ++ .name = "redirected_tty_write", ++ .file = "drivers/char/tty_io.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000210_hash = { ++ .next = NULL, ++ .name = "__register_chrdev", ++ .file = "include/linux/fs.h", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000212_hash = { ++ .next = NULL, ++ .name = "request_key_auth_read", ++ .file = "security/keys/request_key_auth.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000213_hash = { ++ .next = NULL, ++ .name = "selinux_transaction_write", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000214_hash = { ++ .next = NULL, ++ .name = "shash_async_setkey", ++ .file = "crypto/shash.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000215_hash = { ++ .next = NULL, ++ .name = "shash_compat_setkey", ++ .file = "crypto/shash.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000216_hash = { ++ .next = NULL, ++ .name = "simple_read_from_buffer", ++ .file = "include/linux/fs.h", ++ .param2 = 1, ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000218_hash = { ++ .next = NULL, ++ .name = "store_ifalias", ++ .file = "net/core/net-sysfs.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000219_hash = { ++ .next = NULL, ++ .name = "strategy_allowed_congestion_control", ++ .file = "net/ipv4/sysctl_net_ipv4.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000220_hash = { ++ .next = NULL, ++ .name = "subbuf_read_actor", ++ .file = "kernel/relay.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000221_hash = { ++ .next = NULL, ++ .name = "sysctl_ipc_registered_data", ++ .file = "ipc/ipc_sysctl.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000222_hash = { ++ .next = NULL, ++ .name = "sysctl_tcp_congestion_control", ++ .file = "net/ipv4/sysctl_net_ipv4.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000223_hash = { ++ .next = NULL, ++ .name = "sysctl_uts_string", ++ .file = "kernel/utsname_sysctl.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000224_hash = { ++ .next = NULL, ++ .name = "sys_fgetxattr", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000225_hash = { ++ .next = NULL, ++ .name = "sysfs_write_file", ++ .file = "fs/sysfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000226_hash = { ++ .next = NULL, ++ .name = "sys_getxattr", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000227_hash = { ++ .next = NULL, ++ .name = "sys_lgetxattr", ++ .file = "include/linux/syscalls.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000228_hash = { ++ .next = NULL, ++ .name = "sys_sched_getaffinity", ++ .file = "include/linux/syscalls.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000229_hash = { ++ .next = NULL, ++ .name = "sys_sched_setaffinity", ++ .file = "include/linux/syscalls.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000230_hash = { ++ .next = NULL, ++ .name = "tpm_read", ++ .file = "drivers/char/tpm/tpm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000231_hash = { ++ .next = NULL, ++ .name = "user_read", ++ .file = "include/keys/user-type.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000232_hash = { ++ .next = NULL, ++ .name = "vcs_read", ++ .file = "drivers/char/vc_screen.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000233_hash = { ++ .next = NULL, ++ .name = "vfs_readv", ++ .file = "include/linux/fs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000234_hash = { ++ .next = NULL, ++ .name = "vfs_writev", ++ .file = "include/linux/fs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000235_hash = { ++ .next = NULL, ++ .name = "vga_arb_read", ++ .file = "drivers/gpu/vga/vgaarb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000236_hash = { ++ .next = NULL, ++ .name = "acpi_system_read_dsdt", ++ .file = "drivers/acpi/system.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000237_hash = { ++ .next = NULL, ++ .name = "acpi_system_read_fadt", ++ .file = "drivers/acpi/system.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000238_hash = { ++ .next = NULL, ++ .name = "cgroup_read_s64", ++ .file = "kernel/cgroup.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000239_hash = { ++ .next = NULL, ++ .name = "cgroup_read_u64", ++ .file = "kernel/cgroup.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000240_hash = { ++ .next = NULL, ++ .name = "cpuset_common_file_read", ++ .file = "kernel/cpuset.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000241_hash = { ++ .next = NULL, ++ .name = "filter_read", ++ .file = "lib/dma-debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000242_hash = { ++ .next = NULL, ++ .name = "ima_show_htable_value", ++ .file = "security/integrity/ima/ima_fs.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000243_hash = { ++ .next = NULL, ++ .name = "kernel_readv", ++ .file = "fs/splice.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000244_hash = { ++ .next = NULL, ++ .name = "mqueue_read_file", ++ .file = "ipc/mqueue.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000245_hash = { ++ .next = NULL, ++ .name = "oom_adjust_read", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000246_hash = { ++ .next = NULL, ++ .name = "parse_table", ++ .file = "kernel/sysctl.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000247_hash = { ++ .next = NULL, ++ .name = "proc_coredump_filter_read", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000248_hash = { ++ .next = NULL, ++ .name = "proc_fdinfo_read", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000249_hash = { ++ .next = NULL, ++ .name = "proc_file_read", ++ .file = "fs/proc/generic.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000250_hash = { ++ .next = NULL, ++ .name = "proc_info_read", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000251_hash = { ++ .next = NULL, ++ .name = "proc_loginuid_read", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000252_hash = { ++ .next = NULL, ++ .name = "proc_pid_attr_read", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000253_hash = { ++ .next = NULL, ++ .name = "proc_sessionid_read", ++ .file = "fs/proc/base.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000254_hash = { ++ .next = NULL, ++ .name = "read_enabled_file_bool", ++ .file = "kernel/kprobes.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000255_hash = { ++ .next = NULL, ++ .name = "read_file_blob", ++ .file = "fs/debugfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000256_hash = { ++ .next = NULL, ++ .name = "read_file_bool", ++ .file = "fs/debugfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000257_hash = { ++ .next = NULL, ++ .name = "read_from_oldmem", ++ .file = "fs/proc/vmcore.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000258_hash = { ++ .next = NULL, ++ .name = "read_oldmem", ++ .file = "drivers/char/mem.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000259_hash = { ++ .next = NULL, ++ .name = "res_counter_read", ++ .file = "include/linux/res_counter.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000260_hash = { ++ .next = NULL, ++ .name = "sel_read_avc_cache_threshold", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000261_hash = { ++ .next = NULL, ++ .name = "sel_read_avc_hash_stats", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000262_hash = { ++ .next = NULL, ++ .name = "sel_read_bool", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000263_hash = { ++ .next = NULL, ++ .name = "sel_read_checkreqprot", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000264_hash = { ++ .next = NULL, ++ .name = "sel_read_class", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000265_hash = { ++ .next = NULL, ++ .name = "sel_read_enforce", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000266_hash = { ++ .next = NULL, ++ .name = "sel_read_handle_unknown", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000267_hash = { ++ .next = NULL, ++ .name = "sel_read_initcon", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000268_hash = { ++ .next = NULL, ++ .name = "sel_read_mls", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000269_hash = { ++ .next = NULL, ++ .name = "sel_read_perm", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000270_hash = { ++ .next = NULL, ++ .name = "sel_read_policycap", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000271_hash = { ++ .next = NULL, ++ .name = "sel_read_policyvers", ++ .file = "security/selinux/selinuxfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000272_hash = { ++ .next = NULL, ++ .name = "simple_attr_read", ++ .file = "include/linux/fs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000273_hash = { ++ .next = NULL, ++ .name = "simple_transaction_read", ++ .file = "include/linux/fs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000274_hash = { ++ .next = NULL, ++ .name = "smk_read_ambient", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000275_hash = { ++ .next = NULL, ++ .name = "smk_read_direct", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000276_hash = { ++ .next = NULL, ++ .name = "smk_read_doi", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000277_hash = { ++ .next = NULL, ++ .name = "smk_read_logging", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000278_hash = { ++ .next = NULL, ++ .name = "smk_read_onlycap", ++ .file = "security/smack/smackfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000279_hash = { ++ .next = NULL, ++ .name = "sysfs_read_file", ++ .file = "fs/sysfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000280_hash = { ++ .next = NULL, ++ .name = "sys_kexec_load", ++ .file = "include/linux/syscalls.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000281_hash = { ++ .next = NULL, ++ .name = "sys_preadv", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000282_hash = { ++ .next = NULL, ++ .name = "sys_pwritev", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000283_hash = { ++ .next = NULL, ++ .name = "sys_readv", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000284_hash = { ++ .next = NULL, ++ .name = "sys_rt_sigpending", ++ .file = "include/linux/syscalls.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000285_hash = { ++ .next = NULL, ++ .name = "sys_writev", ++ .file = "include/linux/syscalls.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000286_hash = { ++ .next = NULL, ++ .name = "do_sysctl", ++ .file = "include/linux/sysctl.h", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000287_hash = { ++ .next = NULL, ++ .name = "ima_show_htable_violations", ++ .file = "security/integrity/ima/ima_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000288_hash = { ++ .next = NULL, ++ .name = "ima_show_measurements_count", ++ .file = "security/integrity/ima/ima_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000289_hash = { ++ .next = NULL, ++ .name = "compat_do_readv_writev", ++ .file = "fs/compat.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000290_hash = { ++ .next = NULL, ++ .name = "compat_sys_kexec_load", ++ .file = "include/linux/kexec.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000291_hash = { ++ .next = NULL, ++ .name = "compat_sys_semtimedop", ++ .file = "include/linux/compat.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000292_hash = { ++ .next = NULL, ++ .name = "__copy_from_user", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000293_hash = { ++ .next = NULL, ++ .name = "__copy_from_user_nocache", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000294_hash = { ++ .next = NULL, ++ .name = "__copy_in_user", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000295_hash = { ++ .next = NULL, ++ .name = "copy_in_user", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000296_hash = { ++ .next = NULL, ++ .name = "__copy_to_user", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000297_hash = { ++ .next = NULL, ++ .name = "__copy_to_user_inatomic", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000298_hash = { ++ .next = NULL, ++ .name = "sys32_rt_sigpending", ++ .file = "arch/x86/include/asm/sys_ia32.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000299_hash = { ++ .next = NULL, ++ .name = "compat_readv", ++ .file = "fs/compat.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000300_hash = { ++ .next = NULL, ++ .name = "compat_writev", ++ .file = "fs/compat.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000301_hash = { ++ .next = NULL, ++ .name = "compat_sys_preadv", ++ .file = "include/linux/compat.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000302_hash = { ++ .next = NULL, ++ .name = "compat_sys_pwritev", ++ .file = "include/linux/compat.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000303_hash = { ++ .next = NULL, ++ .name = "compat_sys_readv", ++ .file = "include/linux/compat.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000304_hash = { ++ .next = NULL, ++ .name = "compat_sys_writev", ++ .file = "include/linux/compat.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000305_hash = { ++ .next = NULL, ++ .name = "acpi_battery_write_alarm", ++ .file = "drivers/acpi/battery.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000306_hash = { ++ .next = NULL, ++ .name = "acpi_battery_write_alarm", ++ .file = "drivers/acpi/sbs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000307_hash = { ++ .next = NULL, ++ .name = "acpi_fan_write_state", ++ .file = "drivers/acpi/fan.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000308_hash = { ++ .next = NULL, ++ .name = "acpi_processor_write_limit", ++ .file = "drivers/acpi/processor_thermal.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000309_hash = { ++ .next = NULL, ++ .name = "acpi_processor_write_throttling", ++ .file = "drivers/acpi/processor_throttling.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000310_hash = { ++ .next = NULL, ++ .name = "acpi_thermal_write_cooling_mode", ++ .file = "drivers/acpi/thermal.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000311_hash = { ++ .next = NULL, ++ .name = "acpi_thermal_write_polling", ++ .file = "drivers/acpi/thermal.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000312_hash = { ++ .next = NULL, ++ .name = "acpi_video_bus_write_DOS", ++ .file = "drivers/acpi/video.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000313_hash = { ++ .next = NULL, ++ .name = "acpi_video_bus_write_POST", ++ .file = "drivers/acpi/video.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000314_hash = { ++ .next = NULL, ++ .name = "acpi_video_device_write_brightness", ++ .file = "drivers/acpi/video.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000315_hash = { ++ .next = NULL, ++ .name = "acpi_video_device_write_state", ++ .file = "drivers/acpi/video.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000316_hash = { ++ .next = NULL, ++ .name = "addtgt", ++ .file = "drivers/block/aoe/aoecmd.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000317_hash = { ++ .next = NULL, ++ .name = "adu_read", ++ .file = "drivers/usb/misc/adutux.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000318_hash = { ++ .next = NULL, ++ .name = "adu_write", ++ .file = "drivers/usb/misc/adutux.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000319_hash = { ++ .next = NULL, ++ .name = "aem_read_sensor", ++ .file = "drivers/hwmon/ibmaem.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000320_hash = { ++ .next = NULL, ++ .name = "aer_inject_write", ++ .file = "drivers/pci/pcie/aer/aer_inject.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000321_hash = { ++ .next = NULL, ++ .name = "aes_decrypt_fail_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000322_hash = { ++ .next = NULL, ++ .name = "aes_decrypt_fail_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000323_hash = { ++ .next = NULL, ++ .name = "aes_decrypt_interrupt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000324_hash = { ++ .next = NULL, ++ .name = "aes_decrypt_interrupt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000325_hash = { ++ .next = NULL, ++ .name = "aes_decrypt_packets_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000326_hash = { ++ .next = NULL, ++ .name = "aes_decrypt_packets_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000327_hash = { ++ .next = NULL, ++ .name = "aes_encrypt_fail_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000328_hash = { ++ .next = NULL, ++ .name = "aes_encrypt_fail_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000329_hash = { ++ .next = NULL, ++ .name = "aes_encrypt_interrupt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000330_hash = { ++ .next = NULL, ++ .name = "aes_encrypt_interrupt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000331_hash = { ++ .next = NULL, ++ .name = "aes_encrypt_packets_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000332_hash = { ++ .next = NULL, ++ .name = "aes_encrypt_packets_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000333_hash = { ++ .next = NULL, ++ .name = "afs_alloc_flat_call", ++ .file = "fs/afs/rxrpc.c", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000335_hash = { ++ .next = NULL, ++ .name = "afs_proc_cells_write", ++ .file = "fs/afs/proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000336_hash = { ++ .next = NULL, ++ .name = "afs_proc_rootcell_write", ++ .file = "fs/afs/proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000337_hash = { ++ .next = NULL, ++ .name = "agp_3_5_isochronous_node_enable", ++ .file = "drivers/char/agp/isoch.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000338_hash = { ++ .next = NULL, ++ .name = "agp_alloc_page_array", ++ .file = "drivers/char/agp/generic.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000339_hash = { ++ .next = NULL, ++ .name = "alloc_context", ++ .file = "drivers/md/dm-raid1.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000340_hash = { ++ .next = NULL, ++ .name = "alloc_context", ++ .file = "drivers/md/dm-stripe.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000341_hash = { ++ .next = NULL, ++ .name = "alloc_ep_req", ++ .file = "drivers/usb/gadget/gmidi.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000342_hash = { ++ .next = NULL, ++ .name = "alloc_ring", ++ .file = "drivers/net/cxgb3/sge.c", ++ .param2 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000344_hash = { ++ .next = NULL, ++ .name = "alloc_ringbuf", ++ .file = "drivers/usb/serial/spcp8x5.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000345_hash = { ++ .next = NULL, ++ .name = "alloc_ts_config", ++ .file = "include/linux/textsearch.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000346_hash = { ++ .next = NULL, ++ .name = "aoedev_flush", ++ .file = "drivers/block/aoe/aoedev.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000347_hash = { ++ .next = NULL, ++ .name = "arcfb_write", ++ .file = "drivers/video/arcfb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000348_hash = { ++ .next = NULL, ++ .name = "arm_read", ++ .file = "drivers/ieee1394/raw1394.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000349_hash = { ++ .next = NULL, ++ .name = "arm_write", ++ .file = "drivers/ieee1394/raw1394.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000350_hash = { ++ .next = NULL, ++ .name = "asd_store_update_bios", ++ .file = "drivers/scsi/aic94xx/aic94xx_init.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000351_hash = { ++ .next = NULL, ++ .name = "asix_read_cmd", ++ .file = "drivers/net/usb/asix.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000352_hash = { ++ .next = NULL, ++ .name = "asix_write_cmd", ++ .file = "drivers/net/usb/asix.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000353_hash = { ++ .next = NULL, ++ .name = "asn1_octets_decode", ++ .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000354_hash = { ++ .next = NULL, ++ .name = "asn1_oid_decode", ++ .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000355_hash = { ++ .next = NULL, ++ .name = "asn1_oid_decode", ++ .file = "fs/cifs/asn1.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000356_hash = { ++ .next = NULL, ++ .name = "atm_get_addr", ++ .file = "net/atm/addr.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000357_hash = { ++ .next = NULL, ++ .name = "attach_hdlc_protocol", ++ .file = "include/linux/hdlc.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000358_hash = { ++ .next = NULL, ++ .name = "av7110_vbi_write", ++ .file = "drivers/media/dvb/ttpci/av7110_v4l.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000359_hash = { ++ .next = NULL, ++ .name = "ax25_setsockopt", ++ .file = "net/ax25/af_ax25.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000360_hash = { ++ .next = NULL, ++ .name = "b43_debugfs_read", ++ .file = "drivers/net/wireless/b43/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000361_hash = { ++ .next = NULL, ++ .name = "b43_debugfs_write", ++ .file = "drivers/net/wireless/b43/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000362_hash = { ++ .next = NULL, ++ .name = "b43legacy_debugfs_read", ++ .file = "drivers/net/wireless/b43legacy/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000363_hash = { ++ .next = NULL, ++ .name = "b43legacy_debugfs_write", ++ .file = "drivers/net/wireless/b43legacy/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000364_hash = { ++ .next = NULL, ++ .name = "bits_to_user", ++ .file = "drivers/input/evdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000365_hash = { ++ .next = NULL, ++ .name = "bm_entry_read", ++ .file = "fs/binfmt_misc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000366_hash = { ++ .next = NULL, ++ .name = "bm_status_read", ++ .file = "fs/binfmt_misc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000367_hash = { ++ .next = NULL, ++ .name = "broadsheetfb_write", ++ .file = "drivers/video/broadsheetfb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000368_hash = { ++ .next = NULL, ++ .name = "btmrvl_curpsmode_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000369_hash = { ++ .next = NULL, ++ .name = "btmrvl_gpiogap_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000370_hash = { ++ .next = NULL, ++ .name = "btmrvl_gpiogap_write", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000371_hash = { ++ .next = NULL, ++ .name = "btmrvl_hscfgcmd_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000372_hash = { ++ .next = NULL, ++ .name = "btmrvl_hscfgcmd_write", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000373_hash = { ++ .next = &_000007_hash, ++ .name = "btmrvl_hscmd_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000374_hash = { ++ .next = NULL, ++ .name = "btmrvl_hscmd_write", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000375_hash = { ++ .next = NULL, ++ .name = "btmrvl_hsmode_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000376_hash = { ++ .next = NULL, ++ .name = "btmrvl_hsmode_write", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000377_hash = { ++ .next = NULL, ++ .name = "btmrvl_hsstate_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000378_hash = { ++ .next = NULL, ++ .name = "btmrvl_pscmd_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000379_hash = { ++ .next = NULL, ++ .name = "btmrvl_pscmd_write", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000380_hash = { ++ .next = NULL, ++ .name = "btmrvl_psmode_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000381_hash = { ++ .next = NULL, ++ .name = "btmrvl_psmode_write", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000382_hash = { ++ .next = NULL, ++ .name = "btmrvl_psstate_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000383_hash = { ++ .next = NULL, ++ .name = "btmrvl_txdnldready_read", ++ .file = "drivers/bluetooth/btmrvl_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000384_hash = { ++ .next = NULL, ++ .name = "cache_do_downcall", ++ .file = "net/sunrpc/cache.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000385_hash = { ++ .next = NULL, ++ .name = "cachefiles_daemon_write", ++ .file = "fs/cachefiles/daemon.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000386_hash = { ++ .next = NULL, ++ .name = "cache_read", ++ .file = "net/sunrpc/cache.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000387_hash = { ++ .next = NULL, ++ .name = "cafe_deliver_buffer", ++ .file = "drivers/media/video/cafe_ccic.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000388_hash = { ++ .next = NULL, ++ .name = "cciss_proc_write", ++ .file = "drivers/block/cciss.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000389_hash = { ++ .next = NULL, ++ .name = "cfg80211_connect_result", ++ .file = "include/net/cfg80211.h", ++ .param4 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000391_hash = { ++ .next = NULL, ++ .name = "cfg80211_disconnected", ++ .file = "include/net/cfg80211.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000392_hash = { ++ .next = NULL, ++ .name = "cfg80211_inform_bss", ++ .file = "include/net/cfg80211.h", ++ .param8 = 1, ++}; ++ ++struct size_overflow_hash _000393_hash = { ++ .next = NULL, ++ .name = "cfg80211_inform_bss_frame", ++ .file = "include/net/cfg80211.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000394_hash = { ++ .next = NULL, ++ .name = "cfg80211_roamed", ++ .file = "include/net/cfg80211.h", ++ .param4 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000396_hash = { ++ .next = NULL, ++ .name = "cfi_read_pri", ++ .file = "include/linux/mtd/cfi.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000397_hash = { ++ .next = NULL, ++ .name = "cifs_security_flags_proc_write", ++ .file = "fs/cifs/cifs_debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000398_hash = { ++ .next = NULL, ++ .name = "CIFSSMBWrite", ++ .file = "fs/cifs/cifssmb.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000399_hash = { ++ .next = NULL, ++ .name = "cifs_spnego_key_instantiate", ++ .file = "fs/cifs/cifs_spnego.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000400_hash = { ++ .next = NULL, ++ .name = "ci_ll_write", ++ .file = "drivers/media/dvb/ttpci/av7110_ca.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000401_hash = { ++ .next = NULL, ++ .name = "clusterip_proc_write", ++ .file = "net/ipv4/netfilter/ipt_CLUSTERIP.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000402_hash = { ++ .next = &_000075_hash, ++ .name = "cm4040_write", ++ .file = "drivers/char/pcmcia/cm4040_cs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000403_hash = { ++ .next = NULL, ++ .name = "cm_copy_private_data", ++ .file = "drivers/infiniband/core/cm.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000404_hash = { ++ .next = NULL, ++ .name = "cmm_write", ++ .file = "drivers/char/pcmcia/cm4000_cs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000405_hash = { ++ .next = NULL, ++ .name = "coda_psdev_read", ++ .file = "fs/coda/psdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000406_hash = { ++ .next = NULL, ++ .name = "coda_psdev_write", ++ .file = "fs/coda/psdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000407_hash = { ++ .next = NULL, ++ .name = "codec_reg_read_file", ++ .file = "sound/soc/soc-core.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000408_hash = { ++ .next = NULL, ++ .name = "command_file_write", ++ .file = "drivers/misc/ibmasm/ibmasmfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000409_hash = { ++ .next = NULL, ++ .name = "command_write", ++ .file = "drivers/uwb/uwb-debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000410_hash = { ++ .next = NULL, ++ .name = "concat_writev", ++ .file = "drivers/mtd/mtdconcat.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000411_hash = { ++ .next = NULL, ++ .name = "configfs_read_file", ++ .file = "fs/configfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000412_hash = { ++ .next = NULL, ++ .name = "copy_entries_to_user", ++ .file = "net/ipv6/netfilter/ip6_tables.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000413_hash = { ++ .next = NULL, ++ .name = "copy_entries_to_user", ++ .file = "net/ipv4/netfilter/arp_tables.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000414_hash = { ++ .next = NULL, ++ .name = "copy_entries_to_user", ++ .file = "net/ipv4/netfilter/ip_tables.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000415_hash = { ++ .next = NULL, ++ .name = "__copy_from_user_inatomic_nocache", ++ .file = "arch/x86/include/asm/uaccess_32.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000416_hash = { ++ .next = NULL, ++ .name = "copy_from_user_toio", ++ .file = "include/sound/core.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000417_hash = { ++ .next = NULL, ++ .name = "copy_macs", ++ .file = "net/atm/mpc.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000418_hash = { ++ .next = NULL, ++ .name = "copy_to_user_fromio", ++ .file = "include/sound/core.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000419_hash = { ++ .next = NULL, ++ .name = "cosa_write", ++ .file = "drivers/net/wan/cosa.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000420_hash = { ++ .next = NULL, ++ .name = "create_attr_set", ++ .file = "drivers/platform/x86/thinkpad_acpi.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000421_hash = { ++ .next = NULL, ++ .name = "create_entry", ++ .file = "fs/binfmt_misc.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000422_hash = { ++ .next = NULL, ++ .name = "create_queues", ++ .file = "drivers/atm/ambassador.c", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000424_hash = { ++ .next = NULL, ++ .name = "cryptd_alloc_instance", ++ .file = "crypto/cryptd.c", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000426_hash = { ++ .next = NULL, ++ .name = "cryptd_hash_setkey", ++ .file = "crypto/cryptd.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000427_hash = { ++ .next = NULL, ++ .name = "crypto_authenc_setkey", ++ .file = "crypto/authenc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000428_hash = { ++ .next = NULL, ++ .name = "csr1212_new_leaf", ++ .file = "drivers/ieee1394/csr1212.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000429_hash = { ++ .next = NULL, ++ .name = "csr1212_rom_cache_malloc", ++ .file = "drivers/ieee1394/csr1212.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000430_hash = { ++ .next = NULL, ++ .name = "ctrl_out", ++ .file = "drivers/usb/misc/usbtest.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000431_hash = { ++ .next = NULL, ++ .name = "cx18_copy_buf_to_user", ++ .file = "drivers/media/video/cx18/cx18-fileops.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000432_hash = { ++ .next = NULL, ++ .name = "cx24116_writeregN", ++ .file = "drivers/media/dvb/frontends/cx24116.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000433_hash = { ++ .next = NULL, ++ .name = "cxgb3i_alloc_big_mem", ++ .file = "drivers/scsi/cxgb3i/cxgb3i_ddp.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000434_hash = { ++ .next = NULL, ++ .name = "cxgb3i_ddp_make_gl", ++ .file = "drivers/scsi/cxgb3i/cxgb3i_ddp.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000435_hash = { ++ .next = &_000140_hash, ++ .name = "cxgb_alloc_mem", ++ .file = "drivers/net/cxgb3/cxgb3_offload.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000436_hash = { ++ .next = NULL, ++ .name = "__cxio_init_resource_fifo", ++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000437_hash = { ++ .next = NULL, ++ .name = "cypress_buf_alloc", ++ .file = "drivers/usb/serial/cypress_m8.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000438_hash = { ++ .next = NULL, ++ .name = "dabusb_read", ++ .file = "drivers/media/video/dabusb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000439_hash = { ++ .next = NULL, ++ .name = "dac960_user_command_proc_write", ++ .file = "drivers/block/DAC960.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000440_hash = { ++ .next = NULL, ++ .name = "dapm_widget_power_read_file", ++ .file = "sound/soc/soc-dapm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000441_hash = { ++ .next = NULL, ++ .name = "dccp_feat_clone_sp_val", ++ .file = "net/dccp/feat.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000442_hash = { ++ .next = NULL, ++ .name = "dccp_setsockopt_ccid", ++ .file = "net/dccp/proto.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000443_hash = { ++ .next = NULL, ++ .name = "dccp_setsockopt_service", ++ .file = "net/dccp/proto.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000444_hash = { ++ .next = NULL, ++ .name = "debug_buffer_read", ++ .file = "fs/ocfs2/dlm/dlmdebug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000445_hash = { ++ .next = NULL, ++ .name = "debug_output", ++ .file = "drivers/usb/host/ohci-dbg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000446_hash = { ++ .next = NULL, ++ .name = "debug_output", ++ .file = "drivers/usb/host/ehci-dbg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000447_hash = { ++ .next = NULL, ++ .name = "dev_config", ++ .file = "drivers/usb/gadget/inode.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000448_hash = { ++ .next = NULL, ++ .name = "device_write", ++ .file = "fs/dlm/user.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000449_hash = { ++ .next = NULL, ++ .name = "dev_read", ++ .file = "drivers/media/video/gspca/gspca.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000450_hash = { ++ .next = NULL, ++ .name = "dispatch_proc_write", ++ .file = "drivers/platform/x86/thinkpad_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000451_hash = { ++ .next = NULL, ++ .name = "dispatch_write", ++ .file = "drivers/platform/x86/toshiba_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000452_hash = { ++ .next = NULL, ++ .name = "dlmfs_file_read", ++ .file = "fs/ocfs2/dlm/dlmfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000453_hash = { ++ .next = NULL, ++ .name = "dlmfs_file_write", ++ .file = "fs/ocfs2/dlm/dlmfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000454_hash = { ++ .next = NULL, ++ .name = "dma_rx_errors_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000455_hash = { ++ .next = NULL, ++ .name = "dma_rx_errors_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000456_hash = { ++ .next = NULL, ++ .name = "dma_rx_requested_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000457_hash = { ++ .next = NULL, ++ .name = "dma_rx_requested_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000458_hash = { ++ .next = NULL, ++ .name = "dma_tx_errors_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000459_hash = { ++ .next = NULL, ++ .name = "dma_tx_errors_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000460_hash = { ++ .next = NULL, ++ .name = "dma_tx_requested_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000461_hash = { ++ .next = NULL, ++ .name = "dma_tx_requested_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000462_hash = { ++ .next = NULL, ++ .name = "dm_read", ++ .file = "drivers/net/usb/dm9601.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000463_hash = { ++ .next = NULL, ++ .name = "dm_vcalloc", ++ .file = "include/linux/device-mapper.h", ++ .param1 = 1, ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000465_hash = { ++ .next = NULL, ++ .name = "dm_write", ++ .file = "drivers/net/usb/dm9601.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000466_hash = { ++ .next = NULL, ++ .name = "dn_def_dev_strategy", ++ .file = "net/decnet/sysctl_net_decnet.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000467_hash = { ++ .next = NULL, ++ .name = "__dn_setsockopt", ++ .file = "net/decnet/af_decnet.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000468_hash = { ++ .next = NULL, ++ .name = "dns_resolver_instantiate", ++ .file = "fs/cifs/dns_resolve.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000469_hash = { ++ .next = NULL, ++ .name = "dns_resolve_server_name_to_ip", ++ .file = "fs/cifs/dns_resolve.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000470_hash = { ++ .next = NULL, ++ .name = "do_add_counters", ++ .file = "net/ipv6/netfilter/ip6_tables.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000471_hash = { ++ .next = NULL, ++ .name = "do_add_counters", ++ .file = "net/ipv4/netfilter/ip_tables.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000472_hash = { ++ .next = NULL, ++ .name = "do_add_counters", ++ .file = "net/ipv4/netfilter/arp_tables.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000473_hash = { ++ .next = NULL, ++ .name = "do_ipv6_setsockopt", ++ .file = "net/ipv6/ipv6_sockglue.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000474_hash = { ++ .next = NULL, ++ .name = "do_ip_vs_set_ctl", ++ .file = "net/netfilter/ipvs/ip_vs_ctl.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000475_hash = { ++ .next = NULL, ++ .name = "__do_replace", ++ .file = "net/ipv6/netfilter/ip6_tables.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000476_hash = { ++ .next = NULL, ++ .name = "__do_replace", ++ .file = "net/ipv4/netfilter/ip_tables.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000477_hash = { ++ .next = NULL, ++ .name = "__do_replace", ++ .file = "net/ipv4/netfilter/arp_tables.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000478_hash = { ++ .next = NULL, ++ .name = "do_sync", ++ .file = "fs/gfs2/quota.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000479_hash = { ++ .next = NULL, ++ .name = "drm_calloc_large", ++ .file = "include/drm/drmP.h", ++ .param1 = 1, ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000481_hash = { ++ .next = NULL, ++ .name = "drm_sman_init", ++ .file = "drivers/gpu/drm/drm_sman.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000482_hash = { ++ .next = NULL, ++ .name = "drm_vmalloc_dma", ++ .file = "drivers/gpu/drm/drm_scatter.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000483_hash = { ++ .next = NULL, ++ .name = "ds_ioctl", ++ .file = "drivers/pcmcia/pcmcia_ioctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000484_hash = { ++ .next = NULL, ++ .name = "dsp_write", ++ .file = "sound/oss/msnd_pinnacle.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000485_hash = { ++ .next = NULL, ++ .name = "dv1394_read", ++ .file = "drivers/ieee1394/dv1394.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000486_hash = { ++ .next = NULL, ++ .name = "dv1394_write", ++ .file = "drivers/ieee1394/dv1394.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000487_hash = { ++ .next = NULL, ++ .name = "dvb_aplay", ++ .file = "drivers/media/dvb/ttpci/av7110_av.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000488_hash = { ++ .next = NULL, ++ .name = "dvb_ca_en50221_io_write", ++ .file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000489_hash = { ++ .next = NULL, ++ .name = "dvb_dmxdev_set_buffer_size", ++ .file = "drivers/media/dvb/dvb-core/dmxdev.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000490_hash = { ++ .next = NULL, ++ .name = "dvbdmx_write", ++ .file = "drivers/media/dvb/dvb-core/dvb_demux.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000491_hash = { ++ .next = NULL, ++ .name = "dvb_dvr_set_buffer_size", ++ .file = "drivers/media/dvb/dvb-core/dmxdev.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000492_hash = { ++ .next = NULL, ++ .name = "dvb_play", ++ .file = "drivers/media/dvb/ttpci/av7110_av.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000493_hash = { ++ .next = NULL, ++ .name = "dvb_ringbuffer_pkt_read_user", ++ .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000494_hash = { ++ .next = NULL, ++ .name = "dvb_ringbuffer_read_user", ++ .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000495_hash = { ++ .next = NULL, ++ .name = "econet_sendmsg", ++ .file = "net/econet/af_econet.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000496_hash = { ++ .next = NULL, ++ .name = "ecryptfs_copy_filename", ++ .file = "fs/ecryptfs/crypto.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000497_hash = { ++ .next = NULL, ++ .name = "ecryptfs_miscdev_write", ++ .file = "fs/ecryptfs/miscdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000498_hash = { ++ .next = NULL, ++ .name = "ecryptfs_send_miscdev", ++ .file = "fs/ecryptfs/miscdev.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000499_hash = { ++ .next = NULL, ++ .name = "edac_device_alloc_ctl_info", ++ .file = "drivers/edac/edac_device.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000500_hash = { ++ .next = NULL, ++ .name = "edac_mc_alloc", ++ .file = "drivers/edac/edac_mc.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000501_hash = { ++ .next = NULL, ++ .name = "edac_pci_alloc_ctl_info", ++ .file = "drivers/edac/edac_pci.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000502_hash = { ++ .next = &_000199_hash, ++ .name = "edge_buf_alloc", ++ .file = "drivers/usb/serial/io_ti.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000503_hash = { ++ .next = NULL, ++ .name = "efivar_create_sysfs_entry", ++ .file = "drivers/firmware/efivars.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000504_hash = { ++ .next = NULL, ++ .name = "efx_tsoh_heap_alloc", ++ .file = "drivers/net/sfc/tx.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000505_hash = { ++ .next = NULL, ++ .name = "ep0_write", ++ .file = "drivers/usb/gadget/inode.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000506_hash = { ++ .next = NULL, ++ .name = "ep_read", ++ .file = "drivers/usb/gadget/inode.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000507_hash = { ++ .next = NULL, ++ .name = "ep_write", ++ .file = "drivers/usb/gadget/inode.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000508_hash = { ++ .next = NULL, ++ .name = "et61x251_read", ++ .file = "drivers/media/video/et61x251/et61x251_core.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000509_hash = { ++ .next = NULL, ++ .name = "event_calibration_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000510_hash = { ++ .next = NULL, ++ .name = "event_calibration_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000511_hash = { ++ .next = NULL, ++ .name = "event_heart_beat_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000512_hash = { ++ .next = NULL, ++ .name = "event_heart_beat_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000513_hash = { ++ .next = NULL, ++ .name = "event_oom_late_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000514_hash = { ++ .next = NULL, ++ .name = "event_oom_late_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000515_hash = { ++ .next = NULL, ++ .name = "event_phy_transmit_error_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000516_hash = { ++ .next = NULL, ++ .name = "event_phy_transmit_error_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000517_hash = { ++ .next = NULL, ++ .name = "event_rx_mem_empty_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000518_hash = { ++ .next = NULL, ++ .name = "event_rx_mem_empty_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000519_hash = { ++ .next = NULL, ++ .name = "event_rx_mismatch_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000520_hash = { ++ .next = NULL, ++ .name = "event_rx_mismatch_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000521_hash = { ++ .next = NULL, ++ .name = "event_rx_pool_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000522_hash = { ++ .next = NULL, ++ .name = "event_rx_pool_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000523_hash = { ++ .next = NULL, ++ .name = "event_tx_stuck_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000524_hash = { ++ .next = NULL, ++ .name = "event_tx_stuck_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000525_hash = { ++ .next = NULL, ++ .name = "excessive_retries_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000526_hash = { ++ .next = NULL, ++ .name = "excessive_retries_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000527_hash = { ++ .next = NULL, ++ .name = "fb_read", ++ .file = "drivers/video/fbmem.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000528_hash = { ++ .next = NULL, ++ .name = "fb_sys_read", ++ .file = "include/linux/fb.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000529_hash = { ++ .next = NULL, ++ .name = "fb_sys_write", ++ .file = "include/linux/fb.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000530_hash = { ++ .next = NULL, ++ .name = "fb_write", ++ .file = "drivers/video/fbmem.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000531_hash = { ++ .next = NULL, ++ .name = "fcp_request", ++ .file = "drivers/ieee1394/raw1394.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000532_hash = { ++ .next = NULL, ++ .name = "fd_copyin", ++ .file = "drivers/block/floppy.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000533_hash = { ++ .next = NULL, ++ .name = "fd_copyout", ++ .file = "drivers/block/floppy.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000534_hash = { ++ .next = NULL, ++ .name = "fill_write_buffer", ++ .file = "fs/configfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000535_hash = { ++ .next = NULL, ++ .name = "flexcop_device_kmalloc", ++ .file = "drivers/media/dvb/b2c2/flexcop.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000536_hash = { ++ .next = NULL, ++ .name = "format_devstat_counter", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000537_hash = { ++ .next = NULL, ++ .name = "fragmentation_threshold_read", ++ .file = "net/wireless/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000538_hash = { ++ .next = NULL, ++ .name = "framebuffer_alloc", ++ .file = "include/linux/fb.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000539_hash = { ++ .next = NULL, ++ .name = "frequency_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000540_hash = { ++ .next = NULL, ++ .name = "ftdi_elan_write", ++ .file = "drivers/usb/misc/ftdi-elan.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000541_hash = { ++ .next = NULL, ++ .name = "fuse_conn_limit_read", ++ .file = "fs/fuse/control.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000542_hash = { ++ .next = NULL, ++ .name = "fuse_conn_limit_write", ++ .file = "fs/fuse/control.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000543_hash = { ++ .next = &_000388_hash, ++ .name = "fuse_conn_waiting_read", ++ .file = "fs/fuse/control.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000544_hash = { ++ .next = NULL, ++ .name = "garp_attr_create", ++ .file = "net/802/garp.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000545_hash = { ++ .next = NULL, ++ .name = "getdqbuf", ++ .file = "fs/quota/quota_tree.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000546_hash = { ++ .next = NULL, ++ .name = "get_fdb_entries", ++ .file = "net/bridge/br_ioctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000547_hash = { ++ .next = NULL, ++ .name = "get_registers", ++ .file = "drivers/net/usb/pegasus.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000548_hash = { ++ .next = NULL, ++ .name = "get_ucode_user", ++ .file = "arch/x86/kernel/microcode_intel.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000549_hash = { ++ .next = NULL, ++ .name = "gfs2_glock_nq_m", ++ .file = "fs/gfs2/glock.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000550_hash = { ++ .next = NULL, ++ .name = "gigaset_initdriver", ++ .file = "drivers/isdn/gigaset/common.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000551_hash = { ++ .next = NULL, ++ .name = "gs_alloc_req", ++ .file = "drivers/usb/gadget/u_serial.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000552_hash = { ++ .next = NULL, ++ .name = "gs_buf_alloc", ++ .file = "drivers/usb/gadget/u_serial.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000553_hash = { ++ .next = NULL, ++ .name = "gss_pipe_downcall", ++ .file = "net/sunrpc/auth_gss/auth_gss.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000554_hash = { ++ .next = NULL, ++ .name = "hcd_buffer_alloc", ++ .file = "drivers/usb/core/buffer.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000555_hash = { ++ .next = NULL, ++ .name = "hci_sock_setsockopt", ++ .file = "net/bluetooth/hci_sock.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000556_hash = { ++ .next = NULL, ++ .name = "hdpvr_read", ++ .file = "drivers/media/video/hdpvr/hdpvr-video.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000557_hash = { ++ .next = NULL, ++ .name = "hecubafb_write", ++ .file = "drivers/video/hecubafb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000558_hash = { ++ .next = NULL, ++ .name = "hid_parse_report", ++ .file = "include/linux/hid.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000559_hash = { ++ .next = NULL, ++ .name = "hidraw_read", ++ .file = "drivers/hid/hidraw.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000560_hash = { ++ .next = NULL, ++ .name = "hidraw_write", ++ .file = "drivers/hid/hidraw.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000561_hash = { ++ .next = NULL, ++ .name = "hid_register_field", ++ .file = "drivers/hid/hid-core.c", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000563_hash = { ++ .next = NULL, ++ .name = "hpfs_translate_name", ++ .file = "fs/hpfs/name.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000564_hash = { ++ .next = NULL, ++ .name = "hpsb_alloc_host", ++ .file = "drivers/ieee1394/hosts.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000565_hash = { ++ .next = NULL, ++ .name = "hpsb_create_hostinfo", ++ .file = "drivers/ieee1394/highlevel.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000566_hash = { ++ .next = NULL, ++ .name = "hpsb_iso_common_init", ++ .file = "drivers/ieee1394/iso.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000567_hash = { ++ .next = NULL, ++ .name = "ht40allow_map_read", ++ .file = "net/wireless/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000568_hash = { ++ .next = NULL, ++ .name = "__hwahc_dev_set_key", ++ .file = "drivers/usb/host/hwa-hc.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000569_hash = { ++ .next = NULL, ++ .name = "hysdn_conf_read", ++ .file = "drivers/isdn/hysdn/hysdn_procconf.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000570_hash = { ++ .next = NULL, ++ .name = "hysdn_conf_write", ++ .file = "drivers/isdn/hysdn/hysdn_procconf.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000571_hash = { ++ .next = NULL, ++ .name = "hysdn_log_write", ++ .file = "drivers/isdn/hysdn/hysdn_proclog.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000572_hash = { ++ .next = NULL, ++ .name = "i2400m_queue_work", ++ .file = "drivers/net/wimax/i2400m/driver.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000573_hash = { ++ .next = NULL, ++ .name = "i2400m_rx_stats_read", ++ .file = "drivers/net/wimax/i2400m/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000574_hash = { ++ .next = NULL, ++ .name = "i2400m_tx_stats_read", ++ .file = "drivers/net/wimax/i2400m/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000575_hash = { ++ .next = NULL, ++ .name = "__i2400mu_send_barker", ++ .file = "drivers/net/wimax/i2400m/usb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000576_hash = { ++ .next = NULL, ++ .name = "i2cdev_read", ++ .file = "drivers/i2c/i2c-dev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000577_hash = { ++ .next = NULL, ++ .name = "i2cdev_write", ++ .file = "drivers/i2c/i2c-dev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000578_hash = { ++ .next = NULL, ++ .name = "ib_alloc_device", ++ .file = "include/rdma/ib_verbs.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000579_hash = { ++ .next = NULL, ++ .name = "ib_copy_from_udata", ++ .file = "include/rdma/ib_verbs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000580_hash = { ++ .next = NULL, ++ .name = "ib_copy_to_udata", ++ .file = "include/rdma/ib_verbs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000581_hash = { ++ .next = NULL, ++ .name = "ibmasm_new_command", ++ .file = "drivers/misc/ibmasm/command.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000582_hash = { ++ .next = NULL, ++ .name = "ib_ucm_alloc_data", ++ .file = "drivers/infiniband/core/ucm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000583_hash = { ++ .next = NULL, ++ .name = "ib_umad_write", ++ .file = "drivers/infiniband/core/user_mad.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000584_hash = { ++ .next = NULL, ++ .name = "ib_uverbs_unmarshall_recv", ++ .file = "drivers/infiniband/core/uverbs_cmd.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000585_hash = { ++ .next = NULL, ++ .name = "ide_driver_proc_write", ++ .file = "drivers/ide/ide-proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000586_hash = { ++ .next = NULL, ++ .name = "ide_settings_proc_write", ++ .file = "drivers/ide/ide-proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000587_hash = { ++ .next = NULL, ++ .name = "idetape_chrdev_read", ++ .file = "drivers/ide/ide-tape.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000588_hash = { ++ .next = NULL, ++ .name = "idetape_chrdev_write", ++ .file = "drivers/ide/ide-tape.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000589_hash = { ++ .next = NULL, ++ .name = "idmap_pipe_downcall", ++ .file = "fs/nfs/idmap.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000590_hash = { ++ .next = NULL, ++ .name = "idmouse_read", ++ .file = "drivers/usb/misc/idmouse.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000591_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000592_hash = { ++ .next = NULL, ++ .name = "ieee80211_key_alloc", ++ .file = "net/mac80211/key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000593_hash = { ++ .next = NULL, ++ .name = "ikconfig_read_current", ++ .file = "kernel/configs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000594_hash = { ++ .next = NULL, ++ .name = "ilo_read", ++ .file = "drivers/misc/hpilo.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000595_hash = { ++ .next = NULL, ++ .name = "ilo_write", ++ .file = "drivers/misc/hpilo.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000596_hash = { ++ .next = NULL, ++ .name = "iowarrior_read", ++ .file = "drivers/usb/misc/iowarrior.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000597_hash = { ++ .next = NULL, ++ .name = "iowarrior_write", ++ .file = "drivers/usb/misc/iowarrior.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000598_hash = { ++ .next = NULL, ++ .name = "irda_setsockopt", ++ .file = "net/irda/af_irda.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000599_hash = { ++ .next = NULL, ++ .name = "irnet_ctrl_write", ++ .file = "net/irda/irnet/irnet_ppp.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000600_hash = { ++ .next = NULL, ++ .name = "isdn_read", ++ .file = "drivers/isdn/i4l/isdn_common.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000601_hash = { ++ .next = NULL, ++ .name = "iso_callback", ++ .file = "drivers/firewire/core-cdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000602_hash = { ++ .next = NULL, ++ .name = "iso_sched_alloc", ++ .file = "drivers/usb/host/ehci-sched.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000603_hash = { ++ .next = NULL, ++ .name = "isr_cmd_cmplt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000604_hash = { ++ .next = NULL, ++ .name = "isr_cmd_cmplt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000605_hash = { ++ .next = NULL, ++ .name = "isr_commands_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000606_hash = { ++ .next = NULL, ++ .name = "isr_commands_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000607_hash = { ++ .next = NULL, ++ .name = "isr_decrypt_done_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000608_hash = { ++ .next = NULL, ++ .name = "isr_decrypt_done_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000609_hash = { ++ .next = NULL, ++ .name = "isr_dma0_done_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000610_hash = { ++ .next = NULL, ++ .name = "isr_dma0_done_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000611_hash = { ++ .next = NULL, ++ .name = "isr_dma1_done_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000612_hash = { ++ .next = NULL, ++ .name = "isr_dma1_done_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000613_hash = { ++ .next = NULL, ++ .name = "isr_fiqs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000614_hash = { ++ .next = NULL, ++ .name = "isr_fiqs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000615_hash = { ++ .next = NULL, ++ .name = "isr_host_acknowledges_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000616_hash = { ++ .next = NULL, ++ .name = "isr_host_acknowledges_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000617_hash = { ++ .next = &_000460_hash, ++ .name = "isr_hw_pm_mode_changes_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000618_hash = { ++ .next = &_000461_hash, ++ .name = "isr_hw_pm_mode_changes_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000619_hash = { ++ .next = NULL, ++ .name = "isr_irqs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000620_hash = { ++ .next = NULL, ++ .name = "isr_irqs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000621_hash = { ++ .next = NULL, ++ .name = "isr_low_rssi_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000622_hash = { ++ .next = NULL, ++ .name = "isr_low_rssi_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000623_hash = { ++ .next = NULL, ++ .name = "isr_pci_pm_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000624_hash = { ++ .next = NULL, ++ .name = "isr_pci_pm_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000625_hash = { ++ .next = NULL, ++ .name = "isr_rx_headers_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000626_hash = { ++ .next = NULL, ++ .name = "isr_rx_headers_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000627_hash = { ++ .next = NULL, ++ .name = "isr_rx_mem_overflow_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000628_hash = { ++ .next = NULL, ++ .name = "isr_rx_mem_overflow_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000629_hash = { ++ .next = NULL, ++ .name = "isr_rx_procs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000630_hash = { ++ .next = NULL, ++ .name = "isr_rx_procs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000631_hash = { ++ .next = NULL, ++ .name = "isr_rx_rdys_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000632_hash = { ++ .next = NULL, ++ .name = "isr_rx_rdys_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000633_hash = { ++ .next = NULL, ++ .name = "isr_tx_exch_complete_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000634_hash = { ++ .next = NULL, ++ .name = "isr_tx_exch_complete_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000635_hash = { ++ .next = NULL, ++ .name = "isr_tx_procs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000636_hash = { ++ .next = NULL, ++ .name = "isr_tx_procs_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000637_hash = { ++ .next = NULL, ++ .name = "isr_wakeups_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000638_hash = { ++ .next = NULL, ++ .name = "isr_wakeups_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000639_hash = { ++ .next = NULL, ++ .name = "ivtv_copy_buf_to_user", ++ .file = "drivers/media/video/ivtv/ivtv-fileops.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000640_hash = { ++ .next = NULL, ++ .name = "ivtvfb_write", ++ .file = "drivers/media/video/ivtv/ivtvfb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000641_hash = { ++ .next = NULL, ++ .name = "iwl3945_sta_dbgfs_stats_table_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-3945-rs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000642_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_chain_noise_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000643_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_channels_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000644_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_current_sleep_command_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000645_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_disable_ht40_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000646_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_interrupt_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000647_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_led_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000648_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_nvm_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000649_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_qos_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000650_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_rx_queue_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000651_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_rx_statistics_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000652_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_sensitivity_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000653_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_sleep_level_override_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000654_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_sram_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000655_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_stations_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000656_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_status_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000657_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_thermal_throttling_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000658_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_traffic_log_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000659_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_tx_power_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000660_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_tx_queue_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000661_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_tx_statistics_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000662_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_ucode_general_stats_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000663_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_ucode_rx_stats_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000664_hash = { ++ .next = NULL, ++ .name = "iwl_dbgfs_ucode_tx_stats_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000665_hash = { ++ .next = NULL, ++ .name = "iwm_notif_send", ++ .file = "drivers/net/wireless/iwmc3200wifi/main.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000666_hash = { ++ .next = NULL, ++ .name = "iwm_ntf_calib_res", ++ .file = "drivers/net/wireless/iwmc3200wifi/rx.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000667_hash = { ++ .next = NULL, ++ .name = "iwm_umac_set_config_var", ++ .file = "drivers/net/wireless/iwmc3200wifi/commands.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000668_hash = { ++ .next = NULL, ++ .name = "key_algorithm_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000669_hash = { ++ .next = NULL, ++ .name = "key_conf_hw_key_idx_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000670_hash = { ++ .next = NULL, ++ .name = "key_conf_keyidx_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000671_hash = { ++ .next = NULL, ++ .name = "key_conf_keylen_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000672_hash = { ++ .next = NULL, ++ .name = "key_flags_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000673_hash = { ++ .next = NULL, ++ .name = "key_icverrors_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000674_hash = { ++ .next = NULL, ++ .name = "key_ifindex_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000675_hash = { ++ .next = NULL, ++ .name = "key_key_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000676_hash = { ++ .next = NULL, ++ .name = "key_replays_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000677_hash = { ++ .next = NULL, ++ .name = "key_rx_spec_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000678_hash = { ++ .next = NULL, ++ .name = "key_tx_rx_count_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000679_hash = { ++ .next = NULL, ++ .name = "key_tx_spec_read", ++ .file = "net/mac80211/debugfs_key.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000680_hash = { ++ .next = NULL, ++ .name = "kmem_alloc", ++ .file = "fs/xfs/linux-2.6/kmem.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000681_hash = { ++ .next = NULL, ++ .name = "kvm_read_guest_atomic", ++ .file = "include/linux/kvm_host.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000682_hash = { ++ .next = NULL, ++ .name = "l2cap_sock_setsockopt", ++ .file = "net/bluetooth/l2cap.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000683_hash = { ++ .next = NULL, ++ .name = "l2cap_sock_setsockopt_old", ++ .file = "net/bluetooth/l2cap.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000684_hash = { ++ .next = NULL, ++ .name = "lane2_associate_req", ++ .file = "net/atm/lec.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000685_hash = { ++ .next = NULL, ++ .name = "lbs_debugfs_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000686_hash = { ++ .next = NULL, ++ .name = "lbs_debugfs_write", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000687_hash = { ++ .next = NULL, ++ .name = "lbs_dev_info", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000688_hash = { ++ .next = NULL, ++ .name = "lbs_getscantable", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000689_hash = { ++ .next = NULL, ++ .name = "lbs_rdbbp_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000690_hash = { ++ .next = NULL, ++ .name = "lbs_rdmac_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000691_hash = { ++ .next = NULL, ++ .name = "lbs_rdrf_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000692_hash = { ++ .next = NULL, ++ .name = "lbs_sleepparams_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000693_hash = { ++ .next = NULL, ++ .name = "lbs_threshold_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000694_hash = { ++ .next = NULL, ++ .name = "lcd_write", ++ .file = "drivers/usb/misc/usblcd.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000695_hash = { ++ .next = NULL, ++ .name = "leaf_dealloc", ++ .file = "fs/gfs2/dir.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000696_hash = { ++ .next = NULL, ++ .name = "__lgread", ++ .file = "drivers/lguest/core.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000697_hash = { ++ .next = NULL, ++ .name = "__lgwrite", ++ .file = "drivers/lguest/core.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000698_hash = { ++ .next = NULL, ++ .name = "LoadBitmap", ++ .file = "drivers/media/dvb/ttpci/av7110_hw.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000699_hash = { ++ .next = NULL, ++ .name = "long_retry_limit_read", ++ .file = "net/wireless/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000700_hash = { ++ .next = NULL, ++ .name = "lpfc_debugfs_read", ++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000701_hash = { ++ .next = NULL, ++ .name = "lpfc_sli4_queue_alloc", ++ .file = "drivers/scsi/lpfc/lpfc_sli.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000702_hash = { ++ .next = NULL, ++ .name = "lp_write", ++ .file = "drivers/char/lp.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000703_hash = { ++ .next = NULL, ++ .name = "mce_write", ++ .file = "arch/x86/kernel/cpu/mcheck/mce-inject.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000704_hash = { ++ .next = NULL, ++ .name = "mcs7830_get_reg", ++ .file = "drivers/net/usb/mcs7830.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000705_hash = { ++ .next = NULL, ++ .name = "mcs7830_set_reg", ++ .file = "drivers/net/usb/mcs7830.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000706_hash = { ++ .next = NULL, ++ .name = "mdc800_device_read", ++ .file = "drivers/usb/image/mdc800.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000707_hash = { ++ .next = NULL, ++ .name = "memstick_alloc_host", ++ .file = "include/linux/memstick.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000708_hash = { ++ .next = NULL, ++ .name = "metronomefb_write", ++ .file = "drivers/video/metronomefb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000709_hash = { ++ .next = NULL, ++ .name = "mic_calc_failure_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000710_hash = { ++ .next = NULL, ++ .name = "mic_calc_failure_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000711_hash = { ++ .next = &_000189_hash, ++ .name = "mic_rx_pkts_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000712_hash = { ++ .next = &_000489_hash, ++ .name = "mic_rx_pkts_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000713_hash = { ++ .next = NULL, ++ .name = "mlx4_en_create_rx_ring", ++ .file = "drivers/net/mlx4/en_rx.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000714_hash = { ++ .next = NULL, ++ .name = "mlx4_en_create_tx_ring", ++ .file = "drivers/net/mlx4/en_tx.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000715_hash = { ++ .next = NULL, ++ .name = "mmc_ext_csd_read", ++ .file = "drivers/mmc/core/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000716_hash = { ++ .next = NULL, ++ .name = "mmc_send_cxd_data", ++ .file = "drivers/mmc/core/mmc_ops.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000717_hash = { ++ .next = NULL, ++ .name = "mon_bin_get_event", ++ .file = "drivers/usb/mon/mon_bin.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000718_hash = { ++ .next = NULL, ++ .name = "mon_stat_read", ++ .file = "drivers/usb/mon/mon_stat.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000719_hash = { ++ .next = NULL, ++ .name = "mousedev_read", ++ .file = "drivers/input/mousedev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000720_hash = { ++ .next = NULL, ++ .name = "mptctl_getiocinfo", ++ .file = "drivers/message/fusion/mptctl.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000721_hash = { ++ .next = NULL, ++ .name = "msnd_fifo_alloc", ++ .file = "sound/oss/msnd.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000722_hash = { ++ .next = NULL, ++ .name = "mtd_do_readoob", ++ .file = "drivers/mtd/mtdchar.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000723_hash = { ++ .next = NULL, ++ .name = "mtd_do_writeoob", ++ .file = "drivers/mtd/mtdchar.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000724_hash = { ++ .next = NULL, ++ .name = "mtd_read", ++ .file = "drivers/mtd/mtdchar.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000725_hash = { ++ .next = NULL, ++ .name = "mtd_write", ++ .file = "drivers/mtd/mtdchar.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000726_hash = { ++ .next = NULL, ++ .name = "ncp_file_write", ++ .file = "fs/ncpfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000727_hash = { ++ .next = NULL, ++ .name = "ncp__vol2io", ++ .file = "fs/ncpfs/ncplib_kernel.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000728_hash = { ++ .next = NULL, ++ .name = "nfs4_write_cached_acl", ++ .file = "fs/nfs/nfs4proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000729_hash = { ++ .next = NULL, ++ .name = "nfsctl_transaction_read", ++ .file = "fs/nfsd/nfsctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000730_hash = { ++ .next = NULL, ++ .name = "nfsctl_transaction_write", ++ .file = "fs/nfsd/nfsctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000731_hash = { ++ .next = NULL, ++ .name = "nfsd_cache_update", ++ .file = "include/linux/nfsd/cache.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000732_hash = { ++ .next = NULL, ++ .name = "nfs_readdata_alloc", ++ .file = "include/linux/nfs_fs.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000733_hash = { ++ .next = NULL, ++ .name = "nfs_writedata_alloc", ++ .file = "include/linux/nfs_fs.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000734_hash = { ++ .next = NULL, ++ .name = "noack_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000735_hash = { ++ .next = NULL, ++ .name = "nsm_create_handle", ++ .file = "fs/lockd/mon.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000736_hash = { ++ .next = NULL, ++ .name = "ntfs_copy_from_user", ++ .file = "fs/ntfs/file.c", ++ .param3 = 1, ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000738_hash = { ++ .next = NULL, ++ .name = "__ntfs_copy_from_user_iovec_inatomic", ++ .file = "fs/ntfs/file.c", ++ .param3 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000740_hash = { ++ .next = NULL, ++ .name = "__ntfs_malloc", ++ .file = "fs/ntfs/malloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000741_hash = { ++ .next = NULL, ++ .name = "nvram_write", ++ .file = "drivers/char/nvram.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000742_hash = { ++ .next = NULL, ++ .name = "o2hb_debug_read", ++ .file = "fs/ocfs2/cluster/heartbeat.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000743_hash = { ++ .next = NULL, ++ .name = "o2net_send_message_vec", ++ .file = "fs/ocfs2/cluster/tcp.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000744_hash = { ++ .next = NULL, ++ .name = "ocfs2_control_cfu", ++ .file = "fs/ocfs2/stack_user.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000745_hash = { ++ .next = NULL, ++ .name = "ocfs2_control_read", ++ .file = "fs/ocfs2/stack_user.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000746_hash = { ++ .next = NULL, ++ .name = "ocfs2_debug_read", ++ .file = "fs/ocfs2/super.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000747_hash = { ++ .next = NULL, ++ .name = "oprofilefs_str_to_user", ++ .file = "include/linux/oprofile.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000748_hash = { ++ .next = NULL, ++ .name = "oprofilefs_ulong_from_user", ++ .file = "include/linux/oprofile.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000749_hash = { ++ .next = NULL, ++ .name = "oprofilefs_ulong_to_user", ++ .file = "include/linux/oprofile.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000750_hash = { ++ .next = NULL, ++ .name = "_osd_realloc_seg", ++ .file = "drivers/scsi/osd/osd_initiator.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000751_hash = { ++ .next = NULL, ++ .name = "oti6858_buf_alloc", ++ .file = "drivers/usb/serial/oti6858.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000752_hash = { ++ .next = NULL, ++ .name = "otp_read", ++ .file = "drivers/mtd/devices/mtd_dataflash.c", ++ .param2 = 1, ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000754_hash = { ++ .next = NULL, ++ .name = "packet_setsockopt", ++ .file = "net/packet/af_packet.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000755_hash = { ++ .next = NULL, ++ .name = "parse_arg", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000756_hash = { ++ .next = NULL, ++ .name = "parse_command", ++ .file = "fs/binfmt_misc.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000757_hash = { ++ .next = NULL, ++ .name = "pcmcia_replace_cis", ++ .file = "drivers/pcmcia/cistpl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000758_hash = { ++ .next = NULL, ++ .name = "pcnet32_realloc_rx_ring", ++ .file = "drivers/net/pcnet32.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000759_hash = { ++ .next = NULL, ++ .name = "pcnet32_realloc_tx_ring", ++ .file = "drivers/net/pcnet32.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000760_hash = { ++ .next = NULL, ++ .name = "pdu_write_u", ++ .file = "net/9p/protocol.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000761_hash = { ++ .next = NULL, ++ .name = "pgctrl_write", ++ .file = "net/core/pktgen.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000762_hash = { ++ .next = NULL, ++ .name = "pg_read", ++ .file = "drivers/block/paride/pg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000763_hash = { ++ .next = NULL, ++ .name = "pg_write", ++ .file = "drivers/block/paride/pg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000764_hash = { ++ .next = NULL, ++ .name = "pkt_add", ++ .file = "drivers/usb/serial/garmin_gps.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000765_hash = { ++ .next = NULL, ++ .name = "pktgen_if_write", ++ .file = "net/core/pktgen.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000766_hash = { ++ .next = NULL, ++ .name = "pl2303_buf_alloc", ++ .file = "drivers/usb/serial/pl2303.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000767_hash = { ++ .next = NULL, ++ .name = "ppp_cp_parse_cr", ++ .file = "drivers/net/wan/hdlc_ppp.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000768_hash = { ++ .next = NULL, ++ .name = "ppp_write", ++ .file = "drivers/net/ppp_generic.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000769_hash = { ++ .next = NULL, ++ .name = "pp_read", ++ .file = "drivers/char/ppdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000770_hash = { ++ .next = NULL, ++ .name = "pp_write", ++ .file = "drivers/char/ppdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000771_hash = { ++ .next = NULL, ++ .name = "printer_read", ++ .file = "drivers/usb/gadget/printer.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000772_hash = { ++ .next = NULL, ++ .name = "printer_req_alloc", ++ .file = "drivers/usb/gadget/printer.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000773_hash = { ++ .next = NULL, ++ .name = "printer_write", ++ .file = "drivers/usb/gadget/printer.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000774_hash = { ++ .next = NULL, ++ .name = "prism2_set_genericelement", ++ .file = "drivers/net/wireless/hostap/hostap_ioctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000775_hash = { ++ .next = NULL, ++ .name = "proc_read", ++ .file = "drivers/net/wireless/airo.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000776_hash = { ++ .next = NULL, ++ .name = "proc_scsi_devinfo_write", ++ .file = "drivers/scsi/scsi_devinfo.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000777_hash = { ++ .next = NULL, ++ .name = "proc_scsi_write", ++ .file = "drivers/scsi/scsi_proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000778_hash = { ++ .next = NULL, ++ .name = "proc_scsi_write_proc", ++ .file = "drivers/scsi/scsi_proc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000779_hash = { ++ .next = NULL, ++ .name = "proc_write", ++ .file = "drivers/net/wireless/airo.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000780_hash = { ++ .next = NULL, ++ .name = "ps_pspoll_max_apturn_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000781_hash = { ++ .next = NULL, ++ .name = "ps_pspoll_max_apturn_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000782_hash = { ++ .next = NULL, ++ .name = "ps_pspoll_timeouts_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000783_hash = { ++ .next = NULL, ++ .name = "ps_pspoll_timeouts_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000784_hash = { ++ .next = NULL, ++ .name = "ps_pspoll_utilization_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000785_hash = { ++ .next = NULL, ++ .name = "ps_pspoll_utilization_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000786_hash = { ++ .next = NULL, ++ .name = "ps_upsd_max_apturn_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000787_hash = { ++ .next = NULL, ++ .name = "ps_upsd_max_apturn_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000788_hash = { ++ .next = NULL, ++ .name = "ps_upsd_max_sptime_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000789_hash = { ++ .next = NULL, ++ .name = "ps_upsd_max_sptime_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000790_hash = { ++ .next = NULL, ++ .name = "ps_upsd_timeouts_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000791_hash = { ++ .next = NULL, ++ .name = "ps_upsd_timeouts_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000792_hash = { ++ .next = NULL, ++ .name = "ps_upsd_utilization_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000793_hash = { ++ .next = &_000411_hash, ++ .name = "ps_upsd_utilization_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000794_hash = { ++ .next = NULL, ++ .name = "pt_read", ++ .file = "drivers/block/paride/pt.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000795_hash = { ++ .next = NULL, ++ .name = "pt_write", ++ .file = "drivers/block/paride/pt.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000796_hash = { ++ .next = NULL, ++ .name = "pvr2_ioread_read", ++ .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000797_hash = { ++ .next = NULL, ++ .name = "pvr2_ioread_set_sync_key", ++ .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000798_hash = { ++ .next = NULL, ++ .name = "pvr2_stream_buffer_count", ++ .file = "drivers/media/video/pvrusb2/pvrusb2-io.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000799_hash = { ++ .next = NULL, ++ .name = "pwc_rvmalloc", ++ .file = "drivers/media/video/pwc/pwc-if.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000800_hash = { ++ .next = NULL, ++ .name = "pwc_video_read", ++ .file = "drivers/media/video/pwc/pwc-if.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000801_hash = { ++ .next = NULL, ++ .name = "pwr_disable_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000802_hash = { ++ .next = NULL, ++ .name = "pwr_disable_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000803_hash = { ++ .next = NULL, ++ .name = "pwr_elp_enter_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000804_hash = { ++ .next = NULL, ++ .name = "pwr_elp_enter_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000805_hash = { ++ .next = NULL, ++ .name = "pwr_enable_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000806_hash = { ++ .next = NULL, ++ .name = "pwr_enable_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000807_hash = { ++ .next = NULL, ++ .name = "pwr_fix_tsf_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000808_hash = { ++ .next = NULL, ++ .name = "pwr_fix_tsf_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000809_hash = { ++ .next = NULL, ++ .name = "pwr_missing_bcns_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000810_hash = { ++ .next = NULL, ++ .name = "pwr_missing_bcns_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000811_hash = { ++ .next = NULL, ++ .name = "pwr_power_save_off_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000812_hash = { ++ .next = NULL, ++ .name = "pwr_power_save_off_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000813_hash = { ++ .next = NULL, ++ .name = "pwr_ps_enter_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000814_hash = { ++ .next = NULL, ++ .name = "pwr_ps_enter_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000815_hash = { ++ .next = NULL, ++ .name = "pwr_rcvd_awake_beacons_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000816_hash = { ++ .next = NULL, ++ .name = "pwr_rcvd_awake_beacons_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000817_hash = { ++ .next = NULL, ++ .name = "pwr_rcvd_beacons_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000818_hash = { ++ .next = NULL, ++ .name = "pwr_rcvd_beacons_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000819_hash = { ++ .next = NULL, ++ .name = "pwr_tx_without_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000820_hash = { ++ .next = NULL, ++ .name = "pwr_tx_without_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000821_hash = { ++ .next = NULL, ++ .name = "pwr_tx_with_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000822_hash = { ++ .next = NULL, ++ .name = "pwr_tx_with_ps_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000823_hash = { ++ .next = NULL, ++ .name = "pwr_wake_on_host_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000824_hash = { ++ .next = NULL, ++ .name = "pwr_wake_on_host_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000825_hash = { ++ .next = NULL, ++ .name = "pwr_wake_on_timer_exp_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000826_hash = { ++ .next = NULL, ++ .name = "pwr_wake_on_timer_exp_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000827_hash = { ++ .next = NULL, ++ .name = "qc_capture", ++ .file = "drivers/media/video/c-qcam.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000828_hash = { ++ .next = NULL, ++ .name = "qla2x00_get_ctx_sp", ++ .file = "drivers/scsi/qla2xxx/qla_init.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000829_hash = { ++ .next = NULL, ++ .name = "queues_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000830_hash = { ++ .next = NULL, ++ .name = "r3964_write", ++ .file = "drivers/char/n_r3964.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000831_hash = { ++ .next = NULL, ++ .name = "raw_setsockopt", ++ .file = "net/can/raw.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000832_hash = { ++ .next = NULL, ++ .name = "rcname_read", ++ .file = "net/mac80211/rate.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000833_hash = { ++ .next = NULL, ++ .name = "rds_message_alloc", ++ .file = "net/rds/message.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000834_hash = { ++ .next = NULL, ++ .name = "rds_page_copy_user", ++ .file = "net/rds/page.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000835_hash = { ++ .next = NULL, ++ .name = "read", ++ .file = "drivers/pci/hotplug/cpqphp_sysfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000836_hash = { ++ .next = NULL, ++ .name = "read_buf", ++ .file = "fs/nfsd/nfs4xdr.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000837_hash = { ++ .next = NULL, ++ .name = "read_cis_cache", ++ .file = "drivers/pcmcia/cistpl.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000838_hash = { ++ .next = NULL, ++ .name = "read_file_beacon", ++ .file = "drivers/net/wireless/ath/ath5k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000839_hash = { ++ .next = NULL, ++ .name = "read_file_debug", ++ .file = "drivers/net/wireless/ath/ath9k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000840_hash = { ++ .next = &_000557_hash, ++ .name = "read_file_debug", ++ .file = "drivers/net/wireless/ath/ath5k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000841_hash = { ++ .next = NULL, ++ .name = "read_file_dma", ++ .file = "drivers/net/wireless/ath/ath9k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000842_hash = { ++ .next = NULL, ++ .name = "read_file_interrupt", ++ .file = "drivers/net/wireless/ath/ath9k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000843_hash = { ++ .next = NULL, ++ .name = "read_file_rcstat", ++ .file = "drivers/net/wireless/ath/ath9k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000844_hash = { ++ .next = NULL, ++ .name = "read_file_wiphy", ++ .file = "drivers/net/wireless/ath/ath9k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000845_hash = { ++ .next = NULL, ++ .name = "read_file_xmit", ++ .file = "drivers/net/wireless/ath/ath9k/debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000846_hash = { ++ .next = NULL, ++ .name = "read_flush", ++ .file = "net/sunrpc/cache.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000847_hash = { ++ .next = NULL, ++ .name = "realloc_buffer", ++ .file = "drivers/scsi/device_handler/scsi_dh_alua.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000848_hash = { ++ .next = NULL, ++ .name = "recent_mt_proc_write", ++ .file = "net/netfilter/xt_recent.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000849_hash = { ++ .next = NULL, ++ .name = "recent_old_proc_write", ++ .file = "net/netfilter/xt_recent.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000850_hash = { ++ .next = NULL, ++ .name = "recv_msg", ++ .file = "net/tipc/socket.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000851_hash = { ++ .next = NULL, ++ .name = "recv_stream", ++ .file = "net/tipc/socket.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000852_hash = { ++ .next = NULL, ++ .name = "reg_w_buf", ++ .file = "drivers/media/video/gspca/t613.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000853_hash = { ++ .next = NULL, ++ .name = "reg_w_ixbuf", ++ .file = "drivers/media/video/gspca/t613.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000854_hash = { ++ .next = NULL, ++ .name = "reiserfs_allocate_list_bitmaps", ++ .file = "include/linux/reiserfs_fs.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000855_hash = { ++ .next = NULL, ++ .name = "reiserfs_resize", ++ .file = "include/linux/reiserfs_fs_sb.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000856_hash = { ++ .next = NULL, ++ .name = "remote_settings_file_write", ++ .file = "drivers/misc/ibmasm/ibmasmfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000857_hash = { ++ .next = NULL, ++ .name = "retry_count_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000858_hash = { ++ .next = NULL, ++ .name = "retry_count_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000859_hash = { ++ .next = NULL, ++ .name = "revalidate", ++ .file = "drivers/block/aoe/aoechr.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000860_hash = { ++ .next = NULL, ++ .name = "rfcomm_sock_setsockopt", ++ .file = "net/bluetooth/rfcomm/sock.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000861_hash = { ++ .next = NULL, ++ .name = "rfkill_fop_read", ++ .file = "net/rfkill/core.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000862_hash = { ++ .next = NULL, ++ .name = "rndis_add_response", ++ .file = "drivers/usb/gadget/rndis.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000863_hash = { ++ .next = NULL, ++ .name = "rpc_malloc", ++ .file = "include/linux/sunrpc/sched.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000864_hash = { ++ .next = NULL, ++ .name = "rs_sta_dbgfs_rate_scale_data_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000865_hash = { ++ .next = NULL, ++ .name = "rs_sta_dbgfs_scale_table_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000866_hash = { ++ .next = NULL, ++ .name = "rs_sta_dbgfs_stats_table_read", ++ .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000867_hash = { ++ .next = NULL, ++ .name = "rt2x00debug_write_bbp", ++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000868_hash = { ++ .next = NULL, ++ .name = "rt2x00debug_write_csr", ++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000869_hash = { ++ .next = &_000596_hash, ++ .name = "rt2x00debug_write_eeprom", ++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000870_hash = { ++ .next = NULL, ++ .name = "rt2x00debug_write_rf", ++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000871_hash = { ++ .next = NULL, ++ .name = "rts_threshold_read", ++ .file = "net/wireless/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000872_hash = { ++ .next = NULL, ++ .name = "rvmalloc", ++ .file = "drivers/media/video/gspca/gspca.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000873_hash = { ++ .next = NULL, ++ .name = "rx_dropped_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000874_hash = { ++ .next = NULL, ++ .name = "rx_dropped_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000875_hash = { ++ .next = NULL, ++ .name = "rx_fcs_err_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000876_hash = { ++ .next = NULL, ++ .name = "rx_fcs_err_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000877_hash = { ++ .next = NULL, ++ .name = "rx_hdr_overflow_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000878_hash = { ++ .next = NULL, ++ .name = "rx_hdr_overflow_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000879_hash = { ++ .next = NULL, ++ .name = "rx_hw_stuck_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000880_hash = { ++ .next = NULL, ++ .name = "rx_hw_stuck_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000881_hash = { ++ .next = NULL, ++ .name = "rx_out_of_mem_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000882_hash = { ++ .next = NULL, ++ .name = "rx_out_of_mem_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000883_hash = { ++ .next = NULL, ++ .name = "rx_path_reset_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000884_hash = { ++ .next = &_000138_hash, ++ .name = "rx_path_reset_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000885_hash = { ++ .next = NULL, ++ .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000886_hash = { ++ .next = NULL, ++ .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000887_hash = { ++ .next = NULL, ++ .name = "rxpipe_descr_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000888_hash = { ++ .next = NULL, ++ .name = "rxpipe_descr_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000889_hash = { ++ .next = NULL, ++ .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000890_hash = { ++ .next = NULL, ++ .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000891_hash = { ++ .next = NULL, ++ .name = "rxpipe_rx_prep_beacon_drop_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000892_hash = { ++ .next = NULL, ++ .name = "rxpipe_rx_prep_beacon_drop_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000893_hash = { ++ .next = NULL, ++ .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000894_hash = { ++ .next = NULL, ++ .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000895_hash = { ++ .next = NULL, ++ .name = "rx_reset_counter_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000896_hash = { ++ .next = NULL, ++ .name = "rx_reset_counter_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000897_hash = { ++ .next = NULL, ++ .name = "rx_xfr_hint_trig_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000898_hash = { ++ .next = NULL, ++ .name = "rx_xfr_hint_trig_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000899_hash = { ++ .next = NULL, ++ .name = "saa_write", ++ .file = "drivers/media/video/stradis.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000900_hash = { ++ .next = NULL, ++ .name = "scsi_tgt_copy_sense", ++ .file = "drivers/scsi/scsi_tgt_lib.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000901_hash = { ++ .next = NULL, ++ .name = "sctp_auth_create_key", ++ .file = "net/sctp/auth.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _000902_hash = { ++ .next = NULL, ++ .name = "sctp_make_abort_user", ++ .file = "include/net/sctp/sm.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000903_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_active_key", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000904_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_adaptation_layer", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000905_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_associnfo", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000906_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_auth_chunk", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000907_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_auth_key", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000908_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_autoclose", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000909_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_context", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000910_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_default_send_param", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000911_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_delayed_ack", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000912_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_del_key", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000913_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_events", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000914_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_hmac_ident", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000915_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_initmsg", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000916_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_maxburst", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000917_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_maxseg", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000918_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_peer_addr_params", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000919_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_peer_primary_addr", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000920_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt_rtoinfo", ++ .file = "net/sctp/socket.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000921_hash = { ++ .next = NULL, ++ .name = "sctp_tsnmap_init", ++ .file = "include/net/sctp/tsnmap.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000922_hash = { ++ .next = NULL, ++ .name = "se401_read", ++ .file = "drivers/media/video/se401.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000923_hash = { ++ .next = NULL, ++ .name = "send_control_msg", ++ .file = "drivers/media/video/zr364xx.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000924_hash = { ++ .next = NULL, ++ .name = "set_aoe_iflist", ++ .file = "drivers/block/aoe/aoenet.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000925_hash = { ++ .next = NULL, ++ .name = "set_registers", ++ .file = "drivers/net/usb/pegasus.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000926_hash = { ++ .next = NULL, ++ .name = "setup_req", ++ .file = "drivers/usb/gadget/inode.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000927_hash = { ++ .next = NULL, ++ .name = "sgl_map_user_pages", ++ .file = "drivers/scsi/st.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000928_hash = { ++ .next = NULL, ++ .name = "sg_proc_write_adio", ++ .file = "drivers/scsi/sg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000929_hash = { ++ .next = NULL, ++ .name = "sg_proc_write_dressz", ++ .file = "drivers/scsi/sg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000930_hash = { ++ .next = NULL, ++ .name = "short_retry_limit_read", ++ .file = "net/wireless/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000931_hash = { ++ .next = NULL, ++ .name = "sm501_create_subdev", ++ .file = "drivers/mfd/sm501.c", ++ .param3 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000933_hash = { ++ .next = NULL, ++ .name = "sn9c102_read", ++ .file = "drivers/media/video/sn9c102/sn9c102_core.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000934_hash = { ++ .next = NULL, ++ .name = "snd_ac97_pcm_assign", ++ .file = "include/sound/ac97_codec.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000935_hash = { ++ .next = NULL, ++ .name = "snd_ctl_elem_user_tlv", ++ .file = "sound/core/control.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000936_hash = { ++ .next = NULL, ++ .name = "snd_emu10k1_fx8010_read", ++ .file = "sound/pci/emu10k1/emuproc.c", ++ .param5 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000938_hash = { ++ .next = NULL, ++ .name = "snd_es1938_capture_copy", ++ .file = "sound/pci/es1938.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000939_hash = { ++ .next = NULL, ++ .name = "snd_gus_dram_peek", ++ .file = "sound/isa/gus/gus_dram.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000940_hash = { ++ .next = NULL, ++ .name = "snd_gus_dram_poke", ++ .file = "sound/isa/gus/gus_dram.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _000941_hash = { ++ .next = NULL, ++ .name = "snd_hdsp_capture_copy", ++ .file = "sound/pci/rme9652/hdsp.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000942_hash = { ++ .next = NULL, ++ .name = "snd_hdspm_capture_copy", ++ .file = "sound/pci/rme9652/hdspm.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000943_hash = { ++ .next = NULL, ++ .name = "snd_hdspm_playback_copy", ++ .file = "sound/pci/rme9652/hdspm.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000944_hash = { ++ .next = NULL, ++ .name = "snd_hdsp_playback_copy", ++ .file = "sound/pci/rme9652/hdsp.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000945_hash = { ++ .next = NULL, ++ .name = "snd_info_entry_write", ++ .file = "sound/core/info.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000946_hash = { ++ .next = NULL, ++ .name = "snd_mem_proc_write", ++ .file = "sound/core/memalloc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000947_hash = { ++ .next = NULL, ++ .name = "snd_opl4_mem_proc_read", ++ .file = "sound/drivers/opl4/opl4_proc.c", ++ .param5 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000949_hash = { ++ .next = NULL, ++ .name = "snd_opl4_mem_proc_write", ++ .file = "sound/drivers/opl4/opl4_proc.c", ++ .param5 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _000951_hash = { ++ .next = NULL, ++ .name = "snd_pcm_aio_read", ++ .file = "sound/core/pcm_native.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000952_hash = { ++ .next = NULL, ++ .name = "snd_pcm_aio_write", ++ .file = "sound/core/pcm_native.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000953_hash = { ++ .next = NULL, ++ .name = "snd_pcm_alloc_vmalloc_buffer", ++ .file = "sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000954_hash = { ++ .next = NULL, ++ .name = "snd_pcm_alloc_vmalloc_buffer", ++ .file = "sound/drivers/vx/vx_pcm.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000955_hash = { ++ .next = NULL, ++ .name = "snd_pcm_alloc_vmalloc_buffer", ++ .file = "drivers/media/video/cx231xx/cx231xx-audio.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000956_hash = { ++ .next = NULL, ++ .name = "snd_pcm_alloc_vmalloc_buffer", ++ .file = "sound/usb/usbaudio.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000957_hash = { ++ .next = NULL, ++ .name = "snd_pcm_alloc_vmalloc_buffer", ++ .file = "drivers/media/video/em28xx/em28xx-audio.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000958_hash = { ++ .next = NULL, ++ .name = "snd_pcm_oss_read1", ++ .file = "sound/core/oss/pcm_oss.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000959_hash = { ++ .next = NULL, ++ .name = "snd_pcm_oss_write1", ++ .file = "sound/core/oss/pcm_oss.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000960_hash = { ++ .next = NULL, ++ .name = "snd_pcm_oss_write2", ++ .file = "sound/core/oss/pcm_oss.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000961_hash = { ++ .next = NULL, ++ .name = "snd_pcm_plugin_build", ++ .file = "sound/core/oss/pcm_plugin.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000962_hash = { ++ .next = NULL, ++ .name = "snd_rme9652_capture_copy", ++ .file = "sound/pci/rme9652/rme9652.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000963_hash = { ++ .next = NULL, ++ .name = "snd_rme9652_playback_copy", ++ .file = "sound/pci/rme9652/rme9652.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _000964_hash = { ++ .next = &_000109_hash, ++ .name = "snd_usb_ctl_msg", ++ .file = "sound/usb/usbaudio.c", ++ .param8 = 1, ++}; ++ ++struct size_overflow_hash _000965_hash = { ++ .next = NULL, ++ .name = "spidev_message", ++ .file = "drivers/spi/spidev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000966_hash = { ++ .next = NULL, ++ .name = "spidev_write", ++ .file = "drivers/spi/spidev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000967_hash = { ++ .next = NULL, ++ .name = "srp_alloc_iu", ++ .file = "drivers/infiniband/ulp/srp/ib_srp.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000968_hash = { ++ .next = NULL, ++ .name = "srp_iu_pool_alloc", ++ .file = "drivers/scsi/libsrp.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000969_hash = { ++ .next = NULL, ++ .name = "srp_ring_alloc", ++ .file = "drivers/scsi/libsrp.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _000970_hash = { ++ .next = NULL, ++ .name = "sta_agg_status_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000971_hash = { ++ .next = NULL, ++ .name = "sta_dev_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000972_hash = { ++ .next = NULL, ++ .name = "sta_flags_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000973_hash = { ++ .next = NULL, ++ .name = "sta_inactive_ms_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000974_hash = { ++ .next = NULL, ++ .name = "sta_last_noise_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000975_hash = { ++ .next = NULL, ++ .name = "sta_last_qual_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000976_hash = { ++ .next = NULL, ++ .name = "sta_last_seq_ctrl_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000977_hash = { ++ .next = NULL, ++ .name = "sta_last_signal_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000978_hash = { ++ .next = NULL, ++ .name = "sta_num_ps_buf_frames_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000979_hash = { ++ .next = NULL, ++ .name = "sta_rx_bytes_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000980_hash = { ++ .next = NULL, ++ .name = "sta_rx_dropped_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000981_hash = { ++ .next = NULL, ++ .name = "sta_rx_duplicates_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000982_hash = { ++ .next = NULL, ++ .name = "sta_rx_fragments_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000983_hash = { ++ .next = NULL, ++ .name = "sta_rx_packets_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000984_hash = { ++ .next = NULL, ++ .name = "stats_failed_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000985_hash = { ++ .next = NULL, ++ .name = "stats_frame_duplicate_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000986_hash = { ++ .next = NULL, ++ .name = "stats_multicast_received_frame_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000987_hash = { ++ .next = NULL, ++ .name = "stats_multicast_transmitted_frame_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000988_hash = { ++ .next = NULL, ++ .name = "stats_multiple_retry_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000989_hash = { ++ .next = NULL, ++ .name = "stats_received_fragment_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000990_hash = { ++ .next = NULL, ++ .name = "stats_retry_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000991_hash = { ++ .next = NULL, ++ .name = "stats_rx_expand_skb_head2_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000992_hash = { ++ .next = NULL, ++ .name = "stats_rx_expand_skb_head_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000993_hash = { ++ .next = NULL, ++ .name = "stats_rx_handlers_drop_defrag_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000994_hash = { ++ .next = NULL, ++ .name = "stats_rx_handlers_drop_nullfunc_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000995_hash = { ++ .next = NULL, ++ .name = "stats_rx_handlers_drop_passive_scan_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000996_hash = { ++ .next = NULL, ++ .name = "stats_rx_handlers_drop_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000997_hash = { ++ .next = NULL, ++ .name = "stats_rx_handlers_drop_short_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000998_hash = { ++ .next = NULL, ++ .name = "stats_rx_handlers_fragments_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _000999_hash = { ++ .next = NULL, ++ .name = "stats_rx_handlers_queued_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001000_hash = { ++ .next = NULL, ++ .name = "stats_transmitted_fragment_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001001_hash = { ++ .next = NULL, ++ .name = "stats_transmitted_frame_count_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001002_hash = { ++ .next = NULL, ++ .name = "stats_tx_expand_skb_head_cloned_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001003_hash = { ++ .next = NULL, ++ .name = "stats_tx_expand_skb_head_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001004_hash = { ++ .next = NULL, ++ .name = "stats_tx_handlers_drop_fragment_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001005_hash = { ++ .next = NULL, ++ .name = "stats_tx_handlers_drop_not_assoc_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001006_hash = { ++ .next = NULL, ++ .name = "stats_tx_handlers_drop_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001007_hash = { ++ .next = NULL, ++ .name = "stats_tx_handlers_drop_unauth_port_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001008_hash = { ++ .next = NULL, ++ .name = "stats_tx_handlers_drop_unencrypted_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001009_hash = { ++ .next = NULL, ++ .name = "stats_tx_handlers_drop_wep_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001010_hash = { ++ .next = NULL, ++ .name = "stats_tx_handlers_queued_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001011_hash = { ++ .next = NULL, ++ .name = "stats_tx_status_drop_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001012_hash = { ++ .next = NULL, ++ .name = "sta_tx_bytes_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001013_hash = { ++ .next = NULL, ++ .name = "sta_tx_filtered_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001014_hash = { ++ .next = NULL, ++ .name = "sta_tx_fragments_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001015_hash = { ++ .next = NULL, ++ .name = "sta_tx_packets_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001016_hash = { ++ .next = NULL, ++ .name = "sta_tx_retry_count_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001017_hash = { ++ .next = NULL, ++ .name = "sta_tx_retry_failed_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001018_hash = { ++ .next = NULL, ++ .name = "sta_wep_weak_iv_count_read", ++ .file = "net/mac80211/debugfs_sta.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001019_hash = { ++ .next = NULL, ++ .name = "stk_prepare_sio_buffers", ++ .file = "drivers/media/video/stk-webcam.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001020_hash = { ++ .next = NULL, ++ .name = "str_to_user", ++ .file = "drivers/input/evdev.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001021_hash = { ++ .next = NULL, ++ .name = "stv680_read", ++ .file = "drivers/media/video/stv680.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001022_hash = { ++ .next = NULL, ++ .name = "svc_pool_map_alloc_arrays", ++ .file = "net/sunrpc/svc.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001023_hash = { ++ .next = NULL, ++ .name = "svc_setsockopt", ++ .file = "net/atm/svc.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001024_hash = { ++ .next = NULL, ++ .name = "tda10048_writeregbulk", ++ .file = "drivers/media/dvb/frontends/tda10048.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001025_hash = { ++ .next = NULL, ++ .name = "tifm_alloc_adapter", ++ .file = "include/linux/tifm.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001026_hash = { ++ .next = NULL, ++ .name = "tipc_subseq_alloc", ++ .file = "net/tipc/name_table.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001027_hash = { ++ .next = NULL, ++ .name = "total_ps_buffered_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001028_hash = { ++ .next = NULL, ++ .name = "tower_write", ++ .file = "drivers/usb/misc/legousbtower.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001029_hash = { ++ .next = NULL, ++ .name = "tsf_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001030_hash = { ++ .next = NULL, ++ .name = "ttm_bo_fbdev_io", ++ .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001031_hash = { ++ .next = NULL, ++ .name = "ttm_bo_io", ++ .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001032_hash = { ++ .next = NULL, ++ .name = "tx_internal_desc_overflow_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001033_hash = { ++ .next = NULL, ++ .name = "tx_internal_desc_overflow_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001034_hash = { ++ .next = &_000323_hash, ++ .name = "tx_queue_len_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001035_hash = { ++ .next = &_000324_hash, ++ .name = "tx_queue_len_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001036_hash = { ++ .next = NULL, ++ .name = "udf_alloc_i_data", ++ .file = "fs/udf/inode.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001037_hash = { ++ .next = NULL, ++ .name = "udf_sb_alloc_partition_maps", ++ .file = "fs/udf/super.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001038_hash = { ++ .next = NULL, ++ .name = "uea_idma_write", ++ .file = "drivers/usb/atm/ueagle-atm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001039_hash = { ++ .next = NULL, ++ .name = "uea_request", ++ .file = "drivers/usb/atm/ueagle-atm.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001040_hash = { ++ .next = NULL, ++ .name = "uea_send_modem_cmd", ++ .file = "drivers/usb/atm/ueagle-atm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001041_hash = { ++ .next = NULL, ++ .name = "uhci_debug_read", ++ .file = "drivers/usb/host/uhci-debug.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001042_hash = { ++ .next = NULL, ++ .name = "uio_read", ++ .file = "drivers/uio/uio.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001043_hash = { ++ .next = NULL, ++ .name = "uio_write", ++ .file = "drivers/uio/uio.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001044_hash = { ++ .next = NULL, ++ .name = "us122l_ctl_msg", ++ .file = "sound/usb/usx2y/us122l.c", ++ .param8 = 1, ++}; ++ ++struct size_overflow_hash _001045_hash = { ++ .next = NULL, ++ .name = "usbdev_read", ++ .file = "drivers/usb/core/devio.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001046_hash = { ++ .next = NULL, ++ .name = "usblp_read", ++ .file = "drivers/usb/class/usblp.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001047_hash = { ++ .next = NULL, ++ .name = "usblp_write", ++ .file = "drivers/usb/class/usblp.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001048_hash = { ++ .next = NULL, ++ .name = "usbtmc_read", ++ .file = "drivers/usb/class/usbtmc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001049_hash = { ++ .next = NULL, ++ .name = "usbtmc_write", ++ .file = "drivers/usb/class/usbtmc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001050_hash = { ++ .next = NULL, ++ .name = "usbvideo_v4l_read", ++ .file = "drivers/media/video/usbvideo/usbvideo.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001051_hash = { ++ .next = NULL, ++ .name = "usbvision_v4l2_read", ++ .file = "drivers/media/video/usbvision/usbvision-video.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001052_hash = { ++ .next = NULL, ++ .name = "uvc_alloc_buffers", ++ .file = "drivers/media/video/uvc/uvc_queue.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001053_hash = { ++ .next = NULL, ++ .name = "uvc_simplify_fraction", ++ .file = "drivers/media/video/uvc/uvc_driver.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001054_hash = { ++ .next = NULL, ++ .name = "uwb_rc_neh_grok_event", ++ .file = "drivers/uwb/neh.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001055_hash = { ++ .next = NULL, ++ .name = "v4l_stk_read", ++ .file = "drivers/media/video/stk-webcam.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001056_hash = { ++ .next = NULL, ++ .name = "vdma_mem_alloc", ++ .file = "arch/x86/include/asm/floppy.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001057_hash = { ++ .next = NULL, ++ .name = "vhci_get_user", ++ .file = "drivers/bluetooth/hci_vhci.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001058_hash = { ++ .next = NULL, ++ .name = "viafb_dfph_proc_write", ++ .file = "drivers/video/via/viafbdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001059_hash = { ++ .next = NULL, ++ .name = "viafb_dfpl_proc_write", ++ .file = "drivers/video/via/viafbdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001060_hash = { ++ .next = NULL, ++ .name = "viafb_dvp0_proc_write", ++ .file = "drivers/video/via/viafbdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001061_hash = { ++ .next = NULL, ++ .name = "viafb_dvp1_proc_write", ++ .file = "drivers/video/via/viafbdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001062_hash = { ++ .next = NULL, ++ .name = "viafb_vt1636_proc_write", ++ .file = "drivers/video/via/viafbdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001063_hash = { ++ .next = NULL, ++ .name = "vicam_read", ++ .file = "drivers/media/video/usbvideo/vicam.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001064_hash = { ++ .next = NULL, ++ .name = "__videobuf_alloc", ++ .file = "drivers/media/video/videobuf-vmalloc.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001065_hash = { ++ .next = NULL, ++ .name = "__videobuf_alloc", ++ .file = "drivers/media/video/videobuf-dma-sg.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001066_hash = { ++ .next = NULL, ++ .name = "__videobuf_copy_to_user", ++ .file = "drivers/media/video/videobuf-dma-sg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001067_hash = { ++ .next = NULL, ++ .name = "__videobuf_copy_to_user", ++ .file = "drivers/media/video/videobuf-vmalloc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001068_hash = { ++ .next = NULL, ++ .name = "vlsi_alloc_ring", ++ .file = "drivers/net/irda/vlsi_ir.c", ++ .param3 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001070_hash = { ++ .next = NULL, ++ .name = "vol_cdev_direct_write", ++ .file = "drivers/mtd/ubi/cdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001071_hash = { ++ .next = NULL, ++ .name = "vol_cdev_read", ++ .file = "drivers/mtd/ubi/cdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001072_hash = { ++ .next = NULL, ++ .name = "vring_add_indirect", ++ .file = "drivers/virtio/virtio_ring.c", ++ .param3 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001074_hash = { ++ .next = NULL, ++ .name = "vring_new_virtqueue", ++ .file = "include/linux/virtio_ring.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001075_hash = { ++ .next = NULL, ++ .name = "vstusb_read", ++ .file = "drivers/usb/misc/vstusb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001076_hash = { ++ .next = NULL, ++ .name = "vstusb_write", ++ .file = "drivers/usb/misc/vstusb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001077_hash = { ++ .next = NULL, ++ .name = "__vxge_hw_channel_allocate", ++ .file = "drivers/net/vxge/vxge-config.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001078_hash = { ++ .next = NULL, ++ .name = "vxge_os_dma_malloc", ++ .file = "drivers/net/vxge/vxge-config.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001079_hash = { ++ .next = NULL, ++ .name = "vxge_os_dma_malloc_async", ++ .file = "drivers/net/vxge/vxge-config.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001080_hash = { ++ .next = NULL, ++ .name = "w9966_v4l_read", ++ .file = "drivers/media/video/w9966.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001081_hash = { ++ .next = NULL, ++ .name = "w9968cf_read", ++ .file = "drivers/media/video/w9968cf.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001082_hash = { ++ .next = NULL, ++ .name = "waiters_read", ++ .file = "fs/dlm/debug_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001083_hash = { ++ .next = NULL, ++ .name = "wa_nep_queue", ++ .file = "drivers/usb/wusbcore/wa-nep.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001084_hash = { ++ .next = NULL, ++ .name = "__wa_xfer_setup_segs", ++ .file = "drivers/usb/wusbcore/wa-xfer.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001085_hash = { ++ .next = NULL, ++ .name = "wdm_read", ++ .file = "drivers/usb/class/cdc-wdm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001086_hash = { ++ .next = NULL, ++ .name = "wdm_write", ++ .file = "drivers/usb/class/cdc-wdm.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001087_hash = { ++ .next = NULL, ++ .name = "wep_addr_key_count_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001088_hash = { ++ .next = NULL, ++ .name = "wep_addr_key_count_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001089_hash = { ++ .next = NULL, ++ .name = "wep_decrypt_fail_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001090_hash = { ++ .next = NULL, ++ .name = "wep_decrypt_fail_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001091_hash = { ++ .next = NULL, ++ .name = "wep_default_key_count_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001092_hash = { ++ .next = NULL, ++ .name = "wep_default_key_count_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001093_hash = { ++ .next = NULL, ++ .name = "wep_interrupt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001094_hash = { ++ .next = NULL, ++ .name = "wep_interrupt_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001095_hash = { ++ .next = NULL, ++ .name = "wep_iv_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001096_hash = { ++ .next = NULL, ++ .name = "wep_key_not_found_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001097_hash = { ++ .next = NULL, ++ .name = "wep_key_not_found_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001098_hash = { ++ .next = NULL, ++ .name = "wep_packets_read", ++ .file = "drivers/net/wireless/wl12xx/wl1271_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001099_hash = { ++ .next = NULL, ++ .name = "wep_packets_read", ++ .file = "drivers/net/wireless/wl12xx/wl1251_debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001100_hash = { ++ .next = NULL, ++ .name = "wpan_phy_alloc", ++ .file = "include/net/wpan-phy.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001101_hash = { ++ .next = NULL, ++ .name = "write_essid", ++ .file = "drivers/net/wireless/ray_cs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001102_hash = { ++ .next = NULL, ++ .name = "write_flush", ++ .file = "net/sunrpc/cache.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001103_hash = { ++ .next = NULL, ++ .name = "write_int", ++ .file = "drivers/net/wireless/ray_cs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001104_hash = { ++ .next = NULL, ++ .name = "write_rio", ++ .file = "drivers/usb/misc/rio500.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001105_hash = { ++ .next = NULL, ++ .name = "wusb_ccm_mac", ++ .file = "drivers/usb/wusbcore/crypto.c", ++ .param7 = 1, ++}; ++ ++struct size_overflow_hash _001106_hash = { ++ .next = NULL, ++ .name = "xfs_attrmulti_attr_set", ++ .file = "fs/xfs/linux-2.6/xfs_ioctl.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001107_hash = { ++ .next = NULL, ++ .name = "xfs_handle_to_dentry", ++ .file = "fs/xfs/linux-2.6/xfs_ioctl.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001108_hash = { ++ .next = NULL, ++ .name = "xprt_rdma_allocate", ++ .file = "net/sunrpc/xprtrdma/transport.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001109_hash = { ++ .next = NULL, ++ .name = "xt_alloc_table_info", ++ .file = "include/linux/netfilter/x_tables.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001110_hash = { ++ .next = NULL, ++ .name = "zc0301_read", ++ .file = "drivers/media/video/zc0301/zc0301_core.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001111_hash = { ++ .next = NULL, ++ .name = "zd_ioread32v_locked", ++ .file = "drivers/net/wireless/zd1211rw/zd_chip.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001112_hash = { ++ .next = NULL, ++ .name = "_zd_iowrite32v_locked", ++ .file = "drivers/net/wireless/zd1211rw/zd_chip.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001113_hash = { ++ .next = NULL, ++ .name = "zd_usb_ioread16v", ++ .file = "drivers/net/wireless/zd1211rw/zd_usb.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001114_hash = { ++ .next = NULL, ++ .name = "zd_usb_iowrite16v", ++ .file = "drivers/net/wireless/zd1211rw/zd_usb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001115_hash = { ++ .next = NULL, ++ .name = "zd_usb_read_fw", ++ .file = "drivers/net/wireless/zd1211rw/zd_usb.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001116_hash = { ++ .next = NULL, ++ .name = "zd_usb_rfwrite", ++ .file = "drivers/net/wireless/zd1211rw/zd_usb.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001117_hash = { ++ .next = NULL, ++ .name = "zoran_write", ++ .file = "drivers/media/video/zoran/zoran_procfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001118_hash = { ++ .next = NULL, ++ .name = "agp_create_user_memory", ++ .file = "drivers/char/agp/generic.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001119_hash = { ++ .next = NULL, ++ .name = "alloc_targets", ++ .file = "drivers/md/dm-table.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001120_hash = { ++ .next = NULL, ++ .name = "aoechr_write", ++ .file = "drivers/block/aoe/aoechr.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001121_hash = { ++ .next = NULL, ++ .name = "atomic_read_file", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001122_hash = { ++ .next = NULL, ++ .name = "bm_entry_write", ++ .file = "fs/binfmt_misc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001123_hash = { ++ .next = NULL, ++ .name = "bm_init", ++ .file = "lib/ts_bm.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001124_hash = { ++ .next = NULL, ++ .name = "bm_register_write", ++ .file = "fs/binfmt_misc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001125_hash = { ++ .next = NULL, ++ .name = "bm_status_write", ++ .file = "fs/binfmt_misc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001126_hash = { ++ .next = NULL, ++ .name = "cache_downcall", ++ .file = "net/sunrpc/cache.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001127_hash = { ++ .next = NULL, ++ .name = "cache_slow_downcall", ++ .file = "net/sunrpc/cache.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001128_hash = { ++ .next = NULL, ++ .name = "cafe_v4l_read", ++ .file = "drivers/media/video/cafe_ccic.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001129_hash = { ++ .next = NULL, ++ .name = "configfs_write_file", ++ .file = "fs/configfs/file.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001130_hash = { ++ .next = NULL, ++ .name = "cpu_type_read", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001131_hash = { ++ .next = NULL, ++ .name = "csr1212_new_descriptor_leaf", ++ .file = "drivers/ieee1394/csr1212.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001132_hash = { ++ .next = NULL, ++ .name = "cx18_read", ++ .file = "drivers/media/video/cx18/cx18-fileops.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001133_hash = { ++ .next = NULL, ++ .name = "cxio_init_resource_fifo", ++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001134_hash = { ++ .next = NULL, ++ .name = "cxio_init_resource_fifo_random", ++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001135_hash = { ++ .next = NULL, ++ .name = "dataflash_read_fact_otp", ++ .file = "drivers/mtd/devices/mtd_dataflash.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001136_hash = { ++ .next = NULL, ++ .name = "dataflash_read_user_otp", ++ .file = "drivers/mtd/devices/mtd_dataflash.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001137_hash = { ++ .next = NULL, ++ .name = "depth_read", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001138_hash = { ++ .next = NULL, ++ .name = "depth_write", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001139_hash = { ++ .next = NULL, ++ .name = "dev_irnet_write", ++ .file = "net/irda/irnet/irnet_ppp.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001140_hash = { ++ .next = NULL, ++ .name = "dev_write", ++ .file = "sound/oss/msnd_pinnacle.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001141_hash = { ++ .next = NULL, ++ .name = "do_dccp_setsockopt", ++ .file = "net/dccp/proto.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001142_hash = { ++ .next = NULL, ++ .name = "dvb_audio_write", ++ .file = "drivers/media/dvb/ttpci/av7110_av.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001143_hash = { ++ .next = NULL, ++ .name = "dvb_demux_do_ioctl", ++ .file = "drivers/media/dvb/dvb-core/dmxdev.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001144_hash = { ++ .next = NULL, ++ .name = "dvb_dvr_do_ioctl", ++ .file = "drivers/media/dvb/dvb-core/dmxdev.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001145_hash = { ++ .next = NULL, ++ .name = "dvb_video_write", ++ .file = "drivers/media/dvb/ttpci/av7110_av.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001146_hash = { ++ .next = NULL, ++ .name = "ecryptfs_decode_and_decrypt_filename", ++ .file = "fs/ecryptfs/crypto.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001147_hash = { ++ .next = NULL, ++ .name = "ecryptfs_encrypt_and_encode_filename", ++ .file = "fs/ecryptfs/crypto.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001148_hash = { ++ .next = NULL, ++ .name = "enable_read", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001149_hash = { ++ .next = NULL, ++ .name = "enable_write", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001150_hash = { ++ .next = NULL, ++ .name = "fallback_on_nodma_alloc", ++ .file = "drivers/block/floppy.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001151_hash = { ++ .next = NULL, ++ .name = "__feat_register_sp", ++ .file = "net/dccp/feat.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001152_hash = { ++ .next = NULL, ++ .name = "frame_alloc", ++ .file = "drivers/media/video/gspca/gspca.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001153_hash = { ++ .next = NULL, ++ .name = "fsm_init", ++ .file = "lib/ts_fsm.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001154_hash = { ++ .next = NULL, ++ .name = "garmin_read_process", ++ .file = "drivers/usb/serial/garmin_gps.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001155_hash = { ++ .next = NULL, ++ .name = "garp_request_join", ++ .file = "include/net/garp.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001156_hash = { ++ .next = NULL, ++ .name = "hpsb_iso_recv_init", ++ .file = "drivers/ieee1394/iso.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001157_hash = { ++ .next = NULL, ++ .name = "hpsb_iso_xmit_init", ++ .file = "drivers/ieee1394/iso.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001158_hash = { ++ .next = NULL, ++ .name = "__hwahc_op_set_gtk", ++ .file = "drivers/usb/host/hwa-hc.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001159_hash = { ++ .next = NULL, ++ .name = "__hwahc_op_set_ptk", ++ .file = "drivers/usb/host/hwa-hc.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001160_hash = { ++ .next = NULL, ++ .name = "ib_send_cm_drep", ++ .file = "include/rdma/ib_cm.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001161_hash = { ++ .next = NULL, ++ .name = "ib_send_cm_mra", ++ .file = "include/rdma/ib_cm.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001162_hash = { ++ .next = NULL, ++ .name = "ib_send_cm_rtu", ++ .file = "include/rdma/ib_cm.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001163_hash = { ++ .next = NULL, ++ .name = "ieee80211_bss_info_update", ++ .file = "net/mac80211/scan.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001164_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_aid", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001165_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_auto_open_plinks", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001166_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_bssid", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001167_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_capab", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001168_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshConfirmTimeout", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001169_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshHoldingTimeout", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001170_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshHWMPactivePathTimeout", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001171_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshHWMPmaxPREQretries", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001172_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001173_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshHWMPpreqMinInterval", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001174_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshMaxPeerLinks", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001175_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshMaxRetries", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001176_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshRetryTimeout", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001177_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dot11MeshTTL", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001178_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dropped_frames_no_route", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001179_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dropped_frames_ttl", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001180_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_drop_unencrypted", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001181_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_dtim_count", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001182_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_estab_plinks", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001183_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_force_unicast_rateidx", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001184_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_fwded_frames", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001185_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_fwded_mcast", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001186_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_fwded_unicast", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001187_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_max_ratectrl_rateidx", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001188_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_min_discovery_timeout", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001189_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_num_buffered_multicast", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001190_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_num_sta_ps", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001191_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_path_refresh_time", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001192_hash = { ++ .next = NULL, ++ .name = "ieee80211_if_read_peer", ++ .file = "net/mac80211/debugfs_netdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001193_hash = { ++ .next = NULL, ++ .name = "init_exception_table", ++ .file = "drivers/md/dm-snap.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001194_hash = { ++ .next = NULL, ++ .name = "init_tid_tabs", ++ .file = "drivers/net/cxgb3/cxgb3_offload.c", ++ .param2 = 1, ++ .param4 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001197_hash = { ++ .next = NULL, ++ .name = "ivtv_read", ++ .file = "drivers/media/video/ivtv/ivtv-fileops.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001198_hash = { ++ .next = NULL, ++ .name = "kmem_realloc", ++ .file = "fs/xfs/linux-2.6/kmem.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001199_hash = { ++ .next = NULL, ++ .name = "kmem_zalloc", ++ .file = "fs/xfs/linux-2.6/kmem.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001200_hash = { ++ .next = NULL, ++ .name = "kmp_init", ++ .file = "lib/ts_kmp.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001201_hash = { ++ .next = NULL, ++ .name = "lbs_bcnmiss_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001202_hash = { ++ .next = NULL, ++ .name = "lbs_failcount_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001203_hash = { ++ .next = NULL, ++ .name = "lbs_highrssi_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001204_hash = { ++ .next = NULL, ++ .name = "lbs_highsnr_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001205_hash = { ++ .next = NULL, ++ .name = "lbs_lowrssi_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001206_hash = { ++ .next = NULL, ++ .name = "lbs_lowsnr_read", ++ .file = "drivers/net/wireless/libertas/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001207_hash = { ++ .next = NULL, ++ .name = "nfs_flush_one", ++ .file = "fs/nfs/write.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001208_hash = { ++ .next = &_000671_hash, ++ .name = "nfs_pagein_one", ++ .file = "fs/nfs/read.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001209_hash = { ++ .next = NULL, ++ .name = "nsm_get_handle", ++ .file = "include/linux/lockd/lockd.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001210_hash = { ++ .next = NULL, ++ .name = "ntfs_copy_from_user_iovec", ++ .file = "fs/ntfs/file.c", ++ .param3 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001212_hash = { ++ .next = NULL, ++ .name = "ntfs_file_buffered_write", ++ .file = "fs/ntfs/file.c", ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001213_hash = { ++ .next = NULL, ++ .name = "ntfs_malloc_nofs", ++ .file = "fs/ntfs/malloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001214_hash = { ++ .next = NULL, ++ .name = "ntfs_malloc_nofs_nofail", ++ .file = "fs/ntfs/malloc.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001215_hash = { ++ .next = NULL, ++ .name = "ocfs2_control_message", ++ .file = "fs/ocfs2/stack_user.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001216_hash = { ++ .next = NULL, ++ .name = "orinoco_add_extscan_result", ++ .file = "drivers/net/wireless/orinoco/scan.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001217_hash = { ++ .next = NULL, ++ .name = "play_iframe", ++ .file = "drivers/media/dvb/ttpci/av7110_av.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001218_hash = { ++ .next = NULL, ++ .name = "pointer_size_read", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001219_hash = { ++ .next = NULL, ++ .name = "proc_write_brn", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001220_hash = { ++ .next = NULL, ++ .name = "proc_write_disp", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001221_hash = { ++ .next = NULL, ++ .name = "proc_write_lcd", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001222_hash = { ++ .next = NULL, ++ .name = "proc_write_ledd", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001223_hash = { ++ .next = NULL, ++ .name = "qcam_read", ++ .file = "drivers/media/video/c-qcam.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001224_hash = { ++ .next = NULL, ++ .name = "scsi_tgt_kspace_exec", ++ .file = "drivers/scsi/scsi_tgt_lib.c", ++ .param8 = 1, ++}; ++ ++struct size_overflow_hash _001225_hash = { ++ .next = NULL, ++ .name = "sctp_sendmsg", ++ .file = "net/sctp/socket.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001226_hash = { ++ .next = NULL, ++ .name = "sctp_setsockopt", ++ .file = "net/sctp/socket.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001227_hash = { ++ .next = NULL, ++ .name = "snd_cs4281_BA0_read", ++ .file = "sound/pci/cs4281.c", ++ .param5 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001229_hash = { ++ .next = NULL, ++ .name = "snd_cs4281_BA1_read", ++ .file = "sound/pci/cs4281.c", ++ .param5 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001231_hash = { ++ .next = NULL, ++ .name = "snd_cs46xx_io_read", ++ .file = "sound/pci/cs46xx/cs46xx_lib.c", ++ .param5 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001233_hash = { ++ .next = NULL, ++ .name = "snd_gus_dram_read", ++ .file = "include/sound/gus.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001234_hash = { ++ .next = NULL, ++ .name = "snd_gus_dram_write", ++ .file = "include/sound/gus.h", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001235_hash = { ++ .next = NULL, ++ .name = "snd_pcm_oss_read", ++ .file = "sound/core/oss/pcm_oss.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001236_hash = { ++ .next = NULL, ++ .name = "snd_pcm_oss_sync1", ++ .file = "sound/core/oss/pcm_oss.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001237_hash = { ++ .next = NULL, ++ .name = "snd_pcm_oss_write", ++ .file = "sound/core/oss/pcm_oss.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001238_hash = { ++ .next = NULL, ++ .name = "snd_rme32_capture_copy", ++ .file = "sound/pci/rme32.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001239_hash = { ++ .next = NULL, ++ .name = "snd_rme32_playback_copy", ++ .file = "sound/pci/rme32.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001240_hash = { ++ .next = NULL, ++ .name = "snd_rme96_capture_copy", ++ .file = "sound/pci/rme96.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001241_hash = { ++ .next = NULL, ++ .name = "snd_rme96_playback_copy", ++ .file = "sound/pci/rme96.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001242_hash = { ++ .next = NULL, ++ .name = "srp_target_alloc", ++ .file = "include/scsi/libsrp.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001243_hash = { ++ .next = NULL, ++ .name = "stats_dot11ACKFailureCount_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001244_hash = { ++ .next = NULL, ++ .name = "stats_dot11FCSErrorCount_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001245_hash = { ++ .next = NULL, ++ .name = "stats_dot11RTSFailureCount_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001246_hash = { ++ .next = NULL, ++ .name = "stats_dot11RTSSuccessCount_read", ++ .file = "net/mac80211/debugfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001247_hash = { ++ .next = NULL, ++ .name = "stk_allocate_buffers", ++ .file = "drivers/media/video/stk-webcam.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001248_hash = { ++ .next = NULL, ++ .name = "timeout_read", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001249_hash = { ++ .next = NULL, ++ .name = "timeout_write", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001250_hash = { ++ .next = NULL, ++ .name = "ulong_read_file", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001251_hash = { ++ .next = NULL, ++ .name = "ulong_write_file", ++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001252_hash = { ++ .next = NULL, ++ .name = "__videobuf_copy_stream", ++ .file = "drivers/media/video/videobuf-dma-sg.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001253_hash = { ++ .next = NULL, ++ .name = "__videobuf_copy_stream", ++ .file = "drivers/media/video/videobuf-vmalloc.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001254_hash = { ++ .next = NULL, ++ .name = "vol_cdev_write", ++ .file = "drivers/mtd/ubi/cdev.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001255_hash = { ++ .next = NULL, ++ .name = "vring_add_buf", ++ .file = "drivers/virtio/virtio_ring.c", ++ .param3 = 1, ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001257_hash = { ++ .next = NULL, ++ .name = "write_led", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001258_hash = { ++ .next = NULL, ++ .name = "wusb_prf", ++ .file = "include/linux/usb/wusb.h", ++ .param7 = 1, ++}; ++ ++struct size_overflow_hash _001259_hash = { ++ .next = NULL, ++ .name = "zd_ioread32v", ++ .file = "drivers/net/wireless/zd1211rw/zd_chip.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001260_hash = { ++ .next = NULL, ++ .name = "agp_generic_alloc_user", ++ .file = "drivers/char/agp/generic.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001261_hash = { ++ .next = NULL, ++ .name = "cache_write", ++ .file = "net/sunrpc/cache.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001262_hash = { ++ .next = NULL, ++ .name = "cx18_read_pos", ++ .file = "drivers/media/video/cx18/cx18-fileops.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001263_hash = { ++ .next = NULL, ++ .name = "cxio_hal_init_resource", ++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c", ++ .param2 = 1, ++ .param7 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001266_hash = { ++ .next = &_000561_hash, ++ .name = "cxio_hal_init_rhdl_resource", ++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001267_hash = { ++ .next = NULL, ++ .name = "dccp_feat_register_sp", ++ .file = "net/dccp/feat.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001269_hash = { ++ .next = NULL, ++ .name = "ivtv_read_pos", ++ .file = "drivers/media/video/ivtv/ivtv-fileops.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001270_hash = { ++ .next = NULL, ++ .name = "kmem_zalloc_greedy", ++ .file = "fs/xfs/linux-2.6/kmem.c", ++ .param2 = 1, ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001272_hash = { ++ .next = NULL, ++ .name = "ocfs2_control_write", ++ .file = "fs/ocfs2/stack_user.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001273_hash = { ++ .next = NULL, ++ .name = "proc_write_bluetooth", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001274_hash = { ++ .next = NULL, ++ .name = "proc_write_mled", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001275_hash = { ++ .next = NULL, ++ .name = "proc_write_tled", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001276_hash = { ++ .next = &_001229_hash, ++ .name = "proc_write_wled", ++ .file = "drivers/platform/x86/asus_acpi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001277_hash = { ++ .next = NULL, ++ .name = "snd_gf1_mem_proc_dump", ++ .file = "sound/isa/gus/gus_mem_proc.c", ++ .param5 = 1, ++ .param6 = 1, ++}; ++ ++struct size_overflow_hash _001279_hash = { ++ .next = NULL, ++ .name = "wusb_prf_256", ++ .file = "include/linux/usb/wusb.h", ++ .param7 = 1, ++}; ++ ++struct size_overflow_hash _001280_hash = { ++ .next = NULL, ++ .name = "wusb_prf_64", ++ .file = "include/linux/usb/wusb.h", ++ .param7 = 1, ++}; ++ ++struct size_overflow_hash _001281_hash = { ++ .next = NULL, ++ .name = "agp_allocate_memory", ++ .file = "include/linux/agp_backend.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001282_hash = { ++ .next = NULL, ++ .name = "agp_allocate_memory_wrap", ++ .file = "drivers/char/agp/frontend.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001283_hash = { ++ .next = NULL, ++ .name = "drm_agp_allocate_memory", ++ .file = "drivers/gpu/drm/drm_agpsupport.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001284_hash = { ++ .next = NULL, ++ .name = "ttm_agp_populate", ++ .file = "drivers/gpu/drm/ttm/ttm_agp_backend.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001285_hash = { ++ .next = NULL, ++ .name = "drm_agp_bind_pages", ++ .file = "drivers/gpu/drm/drm_agpsupport.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001286_hash = { ++ .next = NULL, ++ .name = "atomic_counters_read", ++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001287_hash = { ++ .next = NULL, ++ .name = "atomic_stats_read", ++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001288_hash = { ++ .next = NULL, ++ .name = "compat_do_arpt_set_ctl", ++ .file = "net/ipv4/netfilter/arp_tables.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001289_hash = { ++ .next = NULL, ++ .name = "compat_do_ip6t_set_ctl", ++ .file = "net/ipv6/netfilter/ip6_tables.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001290_hash = { ++ .next = NULL, ++ .name = "compat_do_ipt_set_ctl", ++ .file = "net/ipv4/netfilter/ip_tables.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001291_hash = { ++ .next = &_001192_hash, ++ .name = "do_arpt_set_ctl", ++ .file = "net/ipv4/netfilter/arp_tables.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001292_hash = { ++ .next = NULL, ++ .name = "do_ip6t_set_ctl", ++ .file = "net/ipv6/netfilter/ip6_tables.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001293_hash = { ++ .next = NULL, ++ .name = "do_ipt_set_ctl", ++ .file = "net/ipv4/netfilter/ip_tables.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001294_hash = { ++ .next = NULL, ++ .name = "flash_read", ++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001295_hash = { ++ .next = NULL, ++ .name = "flash_write", ++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001296_hash = { ++ .next = &_000200_hash, ++ .name = "stats_read_ul", ++ .file = "drivers/idle/i7300_idle.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001297_hash = { ++ .next = NULL, ++ .name = "add_numbered_child", ++ .file = "drivers/mfd/twl4030-core.c", ++ .param5 = 1, ++}; ++ ++struct size_overflow_hash _001298_hash = { ++ .next = NULL, ++ .name = "diva_os_malloc", ++ .file = "drivers/isdn/hardware/eicon/platform.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001299_hash = { ++ .next = NULL, ++ .name = "kmalloc_node", ++ .file = "include/linux/slub_def.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001300_hash = { ++ .next = NULL, ++ .name = "pcpu_mem_alloc", ++ .file = "mm/percpu.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001301_hash = { ++ .next = NULL, ++ .name = "xpc_kmalloc_cacheline_aligned", ++ .file = "drivers/misc/sgi-xp/xpc_partition.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001302_hash = { ++ .next = NULL, ++ .name = "xpc_kzalloc_cacheline_aligned", ++ .file = "drivers/misc/sgi-xp/xpc_main.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001303_hash = { ++ .next = NULL, ++ .name = "add_child", ++ .file = "drivers/mfd/twl4030-core.c", ++ .param4 = 1, ++}; ++ ++struct size_overflow_hash _001304_hash = { ++ .next = NULL, ++ .name = "kzalloc_node", ++ .file = "include/linux/slab.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001305_hash = { ++ .next = NULL, ++ .name = "__alloc_bootmem_low_node", ++ .file = "include/linux/bootmem.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001306_hash = { ++ .next = NULL, ++ .name = "__alloc_bootmem_node", ++ .file = "include/linux/bootmem.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001307_hash = { ++ .next = NULL, ++ .name = "__alloc_bootmem_node_nopanic", ++ .file = "include/linux/bootmem.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001308_hash = { ++ .next = NULL, ++ .name = "__earlyonly_bootmem_alloc", ++ .file = "mm/sparse-vmemmap.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001309_hash = { ++ .next = &_001059_hash, ++ .name = "pcpu_alloc_bootmem", ++ .file = "arch/x86/kernel/setup_percpu.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001310_hash = { ++ .next = NULL, ++ .name = "pcpu_fc_alloc", ++ .file = "arch/x86/kernel/setup_percpu.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001311_hash = { ++ .next = NULL, ++ .name = "vmemmap_alloc_block", ++ .file = "include/linux/mm.h", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001312_hash = { ++ .next = NULL, ++ .name = "alloc_ebda_hpc", ++ .file = "drivers/pci/hotplug/ibmphp_ebda.c", ++ .param1 = 1, ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001314_hash = { ++ .next = NULL, ++ .name = "do_pages_stat", ++ .file = "mm/migrate.c", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001315_hash = { ++ .next = NULL, ++ .name = "sys_move_pages", ++ .file = "include/linux/syscalls.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001316_hash = { ++ .next = NULL, ++ .name = "copy_from_user", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001317_hash = { ++ .next = NULL, ++ .name = "__copy_from_user_inatomic", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001318_hash = { ++ .next = NULL, ++ .name = "copy_to_user", ++ .file = "arch/x86/include/asm/uaccess_64.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001319_hash = { ++ .next = NULL, ++ .name = "event_enable_read", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001320_hash = { ++ .next = NULL, ++ .name = "event_filter_read", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001321_hash = { ++ .next = NULL, ++ .name = "event_format_read", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001322_hash = { ++ .next = NULL, ++ .name = "ftrace_pid_read", ++ .file = "kernel/trace/ftrace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001323_hash = { ++ .next = NULL, ++ .name = "ftrace_profile_read", ++ .file = "kernel/trace/ftrace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001324_hash = { ++ .next = NULL, ++ .name = "module_alloc_update_bounds_rx", ++ .file = "kernel/module.c", ++ .param1 = 1, ++}; ++ ++struct size_overflow_hash _001325_hash = { ++ .next = NULL, ++ .name = "rb_simple_read", ++ .file = "kernel/trace/ring_buffer.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001326_hash = { ++ .next = NULL, ++ .name = "show_header", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001327_hash = { ++ .next = NULL, ++ .name = "subsystem_filter_read", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001328_hash = { ++ .next = NULL, ++ .name = "sysprof_sample_read", ++ .file = "kernel/trace/trace_sysprof.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001329_hash = { ++ .next = NULL, ++ .name = "system_enable_read", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001330_hash = { ++ .next = NULL, ++ .name = "trace_options_core_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001331_hash = { ++ .next = NULL, ++ .name = "trace_options_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001332_hash = { ++ .next = NULL, ++ .name = "tracing_clock_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001333_hash = { ++ .next = NULL, ++ .name = "tracing_cpumask_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001334_hash = { ++ .next = NULL, ++ .name = "tracing_ctrl_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001335_hash = { ++ .next = NULL, ++ .name = "tracing_entries_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001336_hash = { ++ .next = NULL, ++ .name = "tracing_mark_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001337_hash = { ++ .next = NULL, ++ .name = "tracing_max_lat_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001338_hash = { ++ .next = NULL, ++ .name = "tracing_read_dyn_info", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001339_hash = { ++ .next = NULL, ++ .name = "tracing_readme_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001340_hash = { ++ .next = NULL, ++ .name = "tracing_saved_cmdlines_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001341_hash = { ++ .next = NULL, ++ .name = "tracing_set_trace_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001342_hash = { ++ .next = NULL, ++ .name = "tracing_stats_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001343_hash = { ++ .next = NULL, ++ .name = "tracing_trace_options_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001344_hash = { ++ .next = NULL, ++ .name = "event_enable_write", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001345_hash = { ++ .next = NULL, ++ .name = "event_filter_write", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001346_hash = { ++ .next = NULL, ++ .name = "event_id_read", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001347_hash = { ++ .next = NULL, ++ .name = "ftrace_pid_write", ++ .file = "kernel/trace/ftrace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001348_hash = { ++ .next = NULL, ++ .name = "ftrace_profile_write", ++ .file = "kernel/trace/ftrace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001349_hash = { ++ .next = NULL, ++ .name = "rb_simple_write", ++ .file = "kernel/trace/ring_buffer.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001350_hash = { ++ .next = NULL, ++ .name = "subsystem_filter_write", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001351_hash = { ++ .next = NULL, ++ .name = "sysprof_sample_write", ++ .file = "kernel/trace/trace_sysprof.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001352_hash = { ++ .next = NULL, ++ .name = "system_enable_write", ++ .file = "kernel/trace/trace_events.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001353_hash = { ++ .next = NULL, ++ .name = "trace_options_core_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001354_hash = { ++ .next = NULL, ++ .name = "trace_options_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001355_hash = { ++ .next = NULL, ++ .name = "trace_seq_to_user", ++ .file = "include/linux/trace_seq.h", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001356_hash = { ++ .next = NULL, ++ .name = "tracing_buffers_read", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001357_hash = { ++ .next = NULL, ++ .name = "tracing_clock_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001358_hash = { ++ .next = NULL, ++ .name = "tracing_ctrl_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001359_hash = { ++ .next = NULL, ++ .name = "tracing_entries_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001360_hash = { ++ .next = NULL, ++ .name = "tracing_max_lat_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001361_hash = { ++ .next = NULL, ++ .name = "tracing_set_trace_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001362_hash = { ++ .next = NULL, ++ .name = "tracing_trace_options_write", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001363_hash = { ++ .next = NULL, ++ .name = "tstats_write", ++ .file = "kernel/time/timer_stats.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001364_hash = { ++ .next = NULL, ++ .name = "tracing_read_pipe", ++ .file = "kernel/trace/trace.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001365_hash = { ++ .next = NULL, ++ .name = "capi_write", ++ .file = "drivers/isdn/capi/capi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001366_hash = { ++ .next = NULL, ++ .name = "compat_sys_move_pages", ++ .file = "include/linux/compat.h", ++ .param2 = 1, ++}; ++ ++struct size_overflow_hash _001367_hash = { ++ .next = NULL, ++ .name = "cpia_write_proc", ++ .file = "drivers/media/video/cpia.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001368_hash = { ++ .next = NULL, ++ .name = "ipath_get_base_info", ++ .file = "drivers/infiniband/hw/ipath/ipath_file_ops.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001369_hash = { ++ .next = NULL, ++ .name = "options_write", ++ .file = "drivers/misc/sgi-gru/gruprocfs.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001370_hash = { ++ .next = NULL, ++ .name = "um_idi_write", ++ .file = "drivers/isdn/hardware/eicon/divasi.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash _001371_hash = { ++ .next = NULL, ++ .name = "uv_ptc_proc_write", ++ .file = "arch/x86/kernel/tlb_uv.c", ++ .param3 = 1, ++}; ++ ++struct size_overflow_hash *size_overflow_hash[65536] = { ++ [51363] = &_000001_hash, ++ [20847] = &_000002_hash, ++ [56878] = &_000003_hash, ++ [11151] = &_000004_hash, ++ [4132] = &_000005_hash, ++ [39070] = &_000006_hash, ++ [35447] = &_000008_hash, ++ [47830] = &_000009_hash, ++ [17521] = &_000010_hash, ++ [62174] = &_000011_hash, ++ [41425] = &_000012_hash, ++ [5785] = &_000013_hash, ++ [19960] = &_000014_hash, ++ [26729] = &_000015_hash, ++ [22403] = &_000016_hash, ++ [23258] = &_000017_hash, ++ [55695] = &_000018_hash, ++ [38964] = &_000019_hash, ++ [42680] = &_000020_hash, ++ [28541] = &_000021_hash, ++ [33165] = &_000022_hash, ++ [22394] = &_000023_hash, ++ [49562] = &_000024_hash, ++ [56881] = &_000025_hash, ++ [13870] = &_000026_hash, ++ [11553] = &_000027_hash, ++ [31825] = &_000028_hash, ++ [53378] = &_000029_hash, ++ [62101] = &_000030_hash, ++ [18152] = &_000031_hash, ++ [43692] = &_000032_hash, ++ [37525] = &_000033_hash, ++ [11925] = &_000034_hash, ++ [20558] = &_000035_hash, ++ [44019] = &_000036_hash, ++ [17854] = &_000037_hash, ++ [17830] = &_000038_hash, ++ [34918] = &_000039_hash, ++ [46839] = &_000040_hash, ++ [57930] = &_000041_hash, ++ [65005] = &_000042_hash, ++ [49567] = &_000043_hash, ++ [61874] = &_000044_hash, ++ [22591] = &_000045_hash, ++ [8743] = &_000046_hash, ++ [47136] = &_000047_hash, ++ [6358] = &_000048_hash, ++ [65254] = &_000049_hash, ++ [34878] = &_000051_hash, ++ [49340] = &_000052_hash, ++ [4863] = &_000053_hash, ++ [46220] = &_000054_hash, ++ [20455] = &_000055_hash, ++ [19917] = &_000057_hash, ++ [25140] = &_000058_hash, ++ [34097] = &_000059_hash, ++ [30319] = &_000060_hash, ++ [58131] = &_000061_hash, ++ [9234] = &_000062_hash, ++ [33309] = &_000063_hash, ++ [56319] = &_000064_hash, ++ [21496] = &_000065_hash, ++ [32154] = &_000066_hash, ++ [38330] = &_000067_hash, ++ [30892] = &_000068_hash, ++ [61067] = &_000070_hash, ++ [16496] = &_000071_hash, ++ [40012] = &_000072_hash, ++ [46014] = &_000073_hash, ++ [39600] = &_000074_hash, ++ [32447] = &_000076_hash, ++ [15439] = &_000077_hash, ++ [17932] = &_000078_hash, ++ [26096] = &_000079_hash, ++ [50814] = &_000080_hash, ++ [22598] = &_000081_hash, ++ [48287] = &_000082_hash, ++ [15611] = &_000083_hash, ++ [20304] = &_000084_hash, ++ [54866] = &_000085_hash, ++ [40395] = &_000086_hash, ++ [24124] = &_000087_hash, ++ [63535] = &_000088_hash, ++ [26678] = &_000089_hash, ++ [64800] = &_000090_hash, ++ [14919] = &_000091_hash, ++ [19929] = &_000092_hash, ++ [65246] = &_000094_hash, ++ [43222] = &_000095_hash, ++ [63488] = &_000096_hash, ++ [17984] = &_000097_hash, ++ [26811] = &_000098_hash, ++ [30848] = &_000099_hash, ++ [59960] = &_000100_hash, ++ [19168] = &_000101_hash, ++ [8885] = &_000102_hash, ++ [15627] = &_000103_hash, ++ [59140] = &_000104_hash, ++ [24633] = &_000105_hash, ++ [21622] = &_000106_hash, ++ [803] = &_000107_hash, ++ [21909] = &_000111_hash, ++ [63679] = &_000112_hash, ++ [57538] = &_000113_hash, ++ [14329] = &_000114_hash, ++ [42442] = &_000115_hash, ++ [23031] = &_000116_hash, ++ [40663] = &_000117_hash, ++ [31235] = &_000118_hash, ++ [48207] = &_000119_hash, ++ [51180] = &_000120_hash, ++ [41364] = &_000121_hash, ++ [24173] = &_000122_hash, ++ [18248] = &_000123_hash, ++ [27992] = &_000124_hash, ++ [9286] = &_000125_hash, ++ [49517] = &_000126_hash, ++ [4732] = &_000129_hash, ++ [36031] = &_000130_hash, ++ [33469] = &_000131_hash, ++ [38783] = &_000132_hash, ++ [19672] = &_000133_hash, ++ [39565] = &_000134_hash, ++ [21877] = &_000135_hash, ++ [21498] = &_000136_hash, ++ [53059] = &_000137_hash, ++ [46645] = &_000139_hash, ++ [37308] = &_000141_hash, ++ [59973] = &_000142_hash, ++ [35895] = &_000143_hash, ++ [13332] = &_000144_hash, ++ [3070] = &_000145_hash, ++ [36665] = &_000146_hash, ++ [12413] = &_000147_hash, ++ [27279] = &_000148_hash, ++ [61023] = &_000149_hash, ++ [44774] = &_000150_hash, ++ [14479] = &_000151_hash, ++ [45702] = &_000152_hash, ++ [5533] = &_000153_hash, ++ [29186] = &_000154_hash, ++ [26311] = &_000155_hash, ++ [40182] = &_000156_hash, ++ [50505] = &_000157_hash, ++ [59061] = &_000158_hash, ++ [40371] = &_000159_hash, ++ [6293] = &_000160_hash, ++ [60587] = &_000161_hash, ++ [8181] = &_000162_hash, ++ [27451] = &_000163_hash, ++ [49699] = &_000164_hash, ++ [41172] = &_000165_hash, ++ [3315] = &_000166_hash, ++ [37550] = &_000167_hash, ++ [6678] = &_000168_hash, ++ [54861] = &_000169_hash, ++ [38306] = &_000170_hash, ++ [14153] = &_000171_hash, ++ [23065] = &_000172_hash, ++ [18156] = &_000173_hash, ++ [18418] = &_000174_hash, ++ [50241] = &_000175_hash, ++ [22498] = &_000176_hash, ++ [10991] = &_000177_hash, ++ [40026] = &_000178_hash, ++ [30445] = &_000179_hash, ++ [57691] = &_000180_hash, ++ [45647] = &_000181_hash, ++ [52008] = &_000182_hash, ++ [58758] = &_000183_hash, ++ [18393] = &_000184_hash, ++ [29375] = &_000186_hash, ++ [37192] = &_000187_hash, ++ [27286] = &_000188_hash, ++ [11860] = &_000190_hash, ++ [52928] = &_000191_hash, ++ [46714] = &_000192_hash, ++ [45873] = &_000193_hash, ++ [22271] = &_000194_hash, ++ [46247] = &_000195_hash, ++ [33246] = &_000196_hash, ++ [58508] = &_000197_hash, ++ [47399] = &_000201_hash, ++ [20494] = &_000202_hash, ++ [9483] = &_000203_hash, ++ [20854] = &_000204_hash, ++ [52341] = &_000205_hash, ++ [44884] = &_000206_hash, ++ [53533] = &_000207_hash, ++ [52267] = &_000208_hash, ++ [4065] = &_000209_hash, ++ [2115] = &_000210_hash, ++ [44017] = &_000212_hash, ++ [24951] = &_000213_hash, ++ [13495] = &_000214_hash, ++ [12988] = &_000215_hash, ++ [55227] = &_000216_hash, ++ [47762] = &_000218_hash, ++ [23349] = &_000219_hash, ++ [17613] = &_000220_hash, ++ [5943] = &_000221_hash, ++ [51344] = &_000222_hash, ++ [39714] = &_000223_hash, ++ [52037] = &_000224_hash, ++ [53757] = &_000225_hash, ++ [5994] = &_000226_hash, ++ [13467] = &_000227_hash, ++ [55732] = &_000228_hash, ++ [19995] = &_000229_hash, ++ [51694] = &_000230_hash, ++ [18402] = &_000231_hash, ++ [25256] = &_000232_hash, ++ [5456] = &_000233_hash, ++ [58261] = &_000234_hash, ++ [24792] = &_000235_hash, ++ [39989] = &_000236_hash, ++ [55340] = &_000237_hash, ++ [52501] = &_000238_hash, ++ [12475] = &_000239_hash, ++ [52921] = &_000240_hash, ++ [19120] = &_000241_hash, ++ [14355] = &_000242_hash, ++ [30563] = &_000243_hash, ++ [57776] = &_000244_hash, ++ [21956] = &_000245_hash, ++ [47450] = &_000246_hash, ++ [44818] = &_000247_hash, ++ [50616] = &_000248_hash, ++ [51238] = &_000249_hash, ++ [49299] = &_000250_hash, ++ [2796] = &_000251_hash, ++ [4190] = &_000252_hash, ++ [11548] = &_000253_hash, ++ [60370] = &_000254_hash, ++ [35863] = &_000255_hash, ++ [31869] = &_000256_hash, ++ [54595] = &_000257_hash, ++ [2808] = &_000258_hash, ++ [24656] = &_000259_hash, ++ [895] = &_000260_hash, ++ [32809] = &_000261_hash, ++ [55621] = &_000262_hash, ++ [1733] = &_000263_hash, ++ [46740] = &_000264_hash, ++ [36069] = &_000265_hash, ++ [26020] = &_000266_hash, ++ [63875] = &_000267_hash, ++ [58608] = &_000268_hash, ++ [8919] = &_000269_hash, ++ [59497] = &_000270_hash, ++ [34782] = &_000271_hash, ++ [40998] = &_000272_hash, ++ [33328] = &_000273_hash, ++ [17866] = &_000274_hash, ++ [38741] = &_000275_hash, ++ [53939] = &_000276_hash, ++ [14658] = &_000277_hash, ++ [42465] = &_000278_hash, ++ [43616] = &_000279_hash, ++ [46818] = &_000280_hash, ++ [16775] = &_000281_hash, ++ [41393] = &_000282_hash, ++ [10532] = &_000283_hash, ++ [50366] = &_000284_hash, ++ [33324] = &_000285_hash, ++ [58154] = &_000286_hash, ++ [38200] = &_000287_hash, ++ [59315] = &_000288_hash, ++ [22017] = &_000289_hash, ++ [34248] = &_000290_hash, ++ [27251] = &_000291_hash, ++ [7461] = &_000292_hash, ++ [44596] = &_000293_hash, ++ [45157] = &_000294_hash, ++ [55069] = &_000295_hash, ++ [29452] = &_000296_hash, ++ [31885] = &_000297_hash, ++ [20325] = &_000298_hash, ++ [18158] = &_000299_hash, ++ [55856] = &_000300_hash, ++ [36032] = &_000301_hash, ++ [52057] = &_000302_hash, ++ [12463] = &_000303_hash, ++ [30616] = &_000304_hash, ++ [12231] = &_000305_hash, ++ [53530] = &_000306_hash, ++ [32388] = &_000307_hash, ++ [16183] = &_000308_hash, ++ [12977] = &_000309_hash, ++ [58677] = &_000310_hash, ++ [11812] = &_000311_hash, ++ [1122] = &_000312_hash, ++ [31013] = &_000313_hash, ++ [42591] = &_000314_hash, ++ [58348] = &_000315_hash, ++ [11595] = &_000316_hash, ++ [15277] = &_000317_hash, ++ [4811] = &_000318_hash, ++ [34231] = &_000319_hash, ++ [42017] = &_000320_hash, ++ [17504] = &_000321_hash, ++ [3089] = &_000322_hash, ++ [46548] = &_000325_hash, ++ [64933] = &_000326_hash, ++ [42300] = &_000327_hash, ++ [60749] = &_000328_hash, ++ [16865] = &_000329_hash, ++ [2448] = &_000330_hash, ++ [11365] = &_000331_hash, ++ [25620] = &_000332_hash, ++ [32935] = &_000333_hash, ++ [60193] = &_000335_hash, ++ [14396] = &_000336_hash, ++ [46395] = &_000337_hash, ++ [24339] = &_000338_hash, ++ [41996] = &_000339_hash, ++ [7371] = &_000340_hash, ++ [59633] = &_000341_hash, ++ [55203] = &_000342_hash, ++ [16534] = &_000344_hash, ++ [21401] = &_000345_hash, ++ [20788] = &_000346_hash, ++ [39232] = &_000347_hash, ++ [60778] = &_000348_hash, ++ [1442] = &_000349_hash, ++ [18162] = &_000350_hash, ++ [65006] = &_000351_hash, ++ [11523] = &_000352_hash, ++ [29207] = &_000353_hash, ++ [18071] = &_000354_hash, ++ [7601] = &_000355_hash, ++ [26260] = &_000356_hash, ++ [63065] = &_000357_hash, ++ [18949] = &_000358_hash, ++ [29070] = &_000359_hash, ++ [29891] = &_000360_hash, ++ [41916] = &_000361_hash, ++ [40474] = &_000362_hash, ++ [63551] = &_000363_hash, ++ [11488] = &_000364_hash, ++ [36934] = &_000365_hash, ++ [63193] = &_000366_hash, ++ [17219] = &_000367_hash, ++ [64306] = &_000368_hash, ++ [24071] = &_000369_hash, ++ [50308] = &_000370_hash, ++ [38790] = &_000371_hash, ++ [9838] = &_000372_hash, ++ [18983] = &_000373_hash, ++ [9656] = &_000374_hash, ++ [18950] = &_000375_hash, ++ [59749] = &_000376_hash, ++ [20465] = &_000377_hash, ++ [4765] = &_000378_hash, ++ [16169] = &_000379_hash, ++ [6930] = &_000380_hash, ++ [16926] = &_000381_hash, ++ [35218] = &_000382_hash, ++ [19956] = &_000383_hash, ++ [2133] = &_000384_hash, ++ [44616] = &_000385_hash, ++ [12840] = &_000386_hash, ++ [39020] = &_000387_hash, ++ [4269] = &_000389_hash, ++ [37627] = &_000391_hash, ++ [33555] = &_000392_hash, ++ [25900] = &_000393_hash, ++ [57927] = &_000394_hash, ++ [44626] = &_000396_hash, ++ [57935] = &_000397_hash, ++ [63266] = &_000398_hash, ++ [36063] = &_000399_hash, ++ [21379] = &_000400_hash, ++ [18507] = &_000401_hash, ++ [55719] = &_000402_hash, ++ [31210] = &_000403_hash, ++ [36207] = &_000404_hash, ++ [41770] = &_000405_hash, ++ [11600] = &_000406_hash, ++ [25576] = &_000407_hash, ++ [7000] = &_000408_hash, ++ [34187] = &_000409_hash, ++ [58533] = &_000410_hash, ++ [1135] = &_000412_hash, ++ [25613] = &_000413_hash, ++ [9541] = &_000414_hash, ++ [49704] = &_000415_hash, ++ [30577] = &_000416_hash, ++ [35722] = &_000417_hash, ++ [60407] = &_000418_hash, ++ [29465] = &_000419_hash, ++ [46891] = &_000420_hash, ++ [43633] = &_000421_hash, ++ [16196] = &_000422_hash, ++ [9646] = &_000424_hash, ++ [59756] = &_000426_hash, ++ [36702] = &_000427_hash, ++ [52476] = &_000428_hash, ++ [57303] = &_000429_hash, ++ [36747] = &_000430_hash, ++ [33643] = &_000431_hash, ++ [29158] = &_000432_hash, ++ [7978] = &_000433_hash, ++ [25178] = &_000434_hash, ++ [54107] = &_000435_hash, ++ [4829] = &_000436_hash, ++ [20972] = &_000437_hash, ++ [51826] = &_000438_hash, ++ [16413] = &_000439_hash, ++ [39712] = &_000440_hash, ++ [35828] = &_000441_hash, ++ [3106] = &_000442_hash, ++ [34039] = &_000443_hash, ++ [61686] = &_000444_hash, ++ [42407] = &_000445_hash, ++ [6253] = &_000446_hash, ++ [32195] = &_000447_hash, ++ [26197] = &_000448_hash, ++ [58003] = &_000449_hash, ++ [41052] = &_000450_hash, ++ [43632] = &_000451_hash, ++ [12029] = &_000452_hash, ++ [23280] = &_000453_hash, ++ [22834] = &_000454_hash, ++ [4419] = &_000455_hash, ++ [27957] = &_000456_hash, ++ [9540] = &_000457_hash, ++ [27106] = &_000458_hash, ++ [8595] = &_000459_hash, ++ [39979] = &_000462_hash, ++ [41540] = &_000463_hash, ++ [43200] = &_000465_hash, ++ [53864] = &_000466_hash, ++ [33494] = &_000467_hash, ++ [25042] = &_000468_hash, ++ [30281] = &_000469_hash, ++ [38112] = &_000470_hash, ++ [46538] = &_000471_hash, ++ [62594] = &_000472_hash, ++ [8339] = &_000473_hash, ++ [45349] = &_000474_hash, ++ [37865] = &_000475_hash, ++ [45763] = &_000476_hash, ++ [62347] = &_000477_hash, ++ [21644] = &_000478_hash, ++ [18719] = &_000479_hash, ++ [43138] = &_000481_hash, ++ [25654] = &_000482_hash, ++ [60014] = &_000483_hash, ++ [11697] = &_000484_hash, ++ [41271] = &_000485_hash, ++ [48356] = &_000486_hash, ++ [27003] = &_000487_hash, ++ [32464] = &_000488_hash, ++ [44248] = &_000490_hash, ++ [16] = &_000491_hash, ++ [29933] = &_000492_hash, ++ [34359] = &_000493_hash, ++ [3154] = &_000494_hash, ++ [23959] = &_000495_hash, ++ [6724] = &_000496_hash, ++ [54587] = &_000497_hash, ++ [28479] = &_000498_hash, ++ [56583] = &_000499_hash, ++ [64644] = &_000500_hash, ++ [23284] = &_000501_hash, ++ [58325] = &_000502_hash, ++ [61655] = &_000503_hash, ++ [32147] = &_000504_hash, ++ [25649] = &_000505_hash, ++ [47428] = &_000506_hash, ++ [47737] = &_000507_hash, ++ [2987] = &_000508_hash, ++ [34901] = &_000509_hash, ++ [49188] = &_000510_hash, ++ [11582] = &_000511_hash, ++ [25935] = &_000512_hash, ++ [13561] = &_000513_hash, ++ [31880] = &_000514_hash, ++ [47768] = &_000515_hash, ++ [62185] = &_000516_hash, ++ [18341] = &_000517_hash, ++ [4052] = &_000518_hash, ++ [1033] = &_000519_hash, ++ [19576] = &_000520_hash, ++ [48846] = &_000521_hash, ++ [63167] = &_000522_hash, ++ [37223] = &_000523_hash, ++ [55574] = &_000524_hash, ++ [13831] = &_000525_hash, ++ [32374] = &_000526_hash, ++ [56078] = &_000527_hash, ++ [65337] = &_000528_hash, ++ [19329] = &_000529_hash, ++ [61088] = &_000530_hash, ++ [34661] = &_000531_hash, ++ [49367] = &_000532_hash, ++ [64731] = &_000533_hash, ++ [23777] = &_000534_hash, ++ [53828] = &_000535_hash, ++ [59115] = &_000536_hash, ++ [4456] = &_000537_hash, ++ [53442] = &_000538_hash, ++ [17500] = &_000539_hash, ++ [47329] = &_000540_hash, ++ [13534] = &_000541_hash, ++ [36955] = &_000542_hash, ++ [9841] = &_000543_hash, ++ [19308] = &_000544_hash, ++ [55652] = &_000545_hash, ++ [7842] = &_000546_hash, ++ [33485] = &_000547_hash, ++ [50750] = &_000548_hash, ++ [44599] = &_000549_hash, ++ [46403] = &_000550_hash, ++ [303] = &_000551_hash, ++ [22960] = &_000552_hash, ++ [10544] = &_000553_hash, ++ [7694] = &_000554_hash, ++ [62082] = &_000555_hash, ++ [57054] = &_000556_hash, ++ [51909] = &_000558_hash, ++ [52239] = &_000559_hash, ++ [24125] = &_000560_hash, ++ [60221] = &_000563_hash, ++ [38542] = &_000564_hash, ++ [45071] = &_000565_hash, ++ [23450] = &_000566_hash, ++ [14295] = &_000567_hash, ++ [25923] = &_000568_hash, ++ [31865] = &_000569_hash, ++ [4764] = &_000570_hash, ++ [10574] = &_000571_hash, ++ [26141] = &_000572_hash, ++ [55766] = &_000573_hash, ++ [22483] = &_000574_hash, ++ [61047] = &_000575_hash, ++ [58978] = &_000576_hash, ++ [47578] = &_000577_hash, ++ [7730] = &_000578_hash, ++ [15904] = &_000579_hash, ++ [25081] = &_000580_hash, ++ [45743] = &_000581_hash, ++ [58830] = &_000582_hash, ++ [59081] = &_000583_hash, ++ [47533] = &_000584_hash, ++ [11305] = &_000585_hash, ++ [56290] = &_000586_hash, ++ [44963] = &_000587_hash, ++ [30026] = &_000588_hash, ++ [56424] = &_000589_hash, ++ [27694] = &_000590_hash, ++ [38583] = &_000591_hash, ++ [20939] = &_000592_hash, ++ [17486] = &_000593_hash, ++ [52548] = &_000594_hash, ++ [18733] = &_000595_hash, ++ [54359] = &_000597_hash, ++ [39434] = &_000598_hash, ++ [3079] = &_000599_hash, ++ [47614] = &_000600_hash, ++ [38186] = &_000601_hash, ++ [10078] = &_000602_hash, ++ [17088] = &_000603_hash, ++ [2737] = &_000604_hash, ++ [13257] = &_000605_hash, ++ [31672] = &_000606_hash, ++ [21293] = &_000607_hash, ++ [7004] = &_000608_hash, ++ [45825] = &_000609_hash, ++ [64368] = &_000610_hash, ++ [11872] = &_000611_hash, ++ [26129] = &_000612_hash, ++ [23921] = &_000613_hash, ++ [5376] = &_000614_hash, ++ [16647] = &_000615_hash, ++ [2422] = &_000616_hash, ++ [44177] = &_000617_hash, ++ [58592] = &_000618_hash, ++ [45474] = &_000619_hash, ++ [63955] = &_000620_hash, ++ [10011] = &_000621_hash, ++ [28522] = &_000622_hash, ++ [44081] = &_000623_hash, ++ [58432] = &_000624_hash, ++ [1994] = &_000625_hash, ++ [20411] = &_000626_hash, ++ [14958] = &_000627_hash, ++ [29215] = &_000628_hash, ++ [42546] = &_000629_hash, ++ [60995] = &_000630_hash, ++ [21469] = &_000631_hash, ++ [7084] = &_000632_hash, ++ [44184] = &_000633_hash, ++ [58601] = &_000634_hash, ++ [32802] = &_000635_hash, ++ [51283] = &_000636_hash, ++ [21432] = &_000637_hash, ++ [7113] = &_000638_hash, ++ [57063] = &_000639_hash, ++ [5800] = &_000640_hash, ++ [45617] = &_000641_hash, ++ [32049] = &_000642_hash, ++ [53922] = &_000643_hash, ++ [49155] = &_000644_hash, ++ [17299] = &_000645_hash, ++ [37940] = &_000646_hash, ++ [31594] = &_000647_hash, ++ [38151] = &_000648_hash, ++ [58827] = &_000649_hash, ++ [34245] = &_000650_hash, ++ [15613] = &_000651_hash, ++ [16046] = &_000652_hash, ++ [50172] = &_000653_hash, ++ [26107] = &_000654_hash, ++ [60543] = &_000655_hash, ++ [56337] = &_000656_hash, ++ [24409] = &_000657_hash, ++ [11732] = &_000658_hash, ++ [65519] = &_000659_hash, ++ [55865] = &_000660_hash, ++ [51480] = &_000661_hash, ++ [2061] = &_000662_hash, ++ [10885] = &_000663_hash, ++ [45913] = &_000664_hash, ++ [8892] = &_000665_hash, ++ [64891] = &_000666_hash, ++ [64537] = &_000667_hash, ++ [55518] = &_000668_hash, ++ [23343] = &_000669_hash, ++ [40783] = &_000670_hash, ++ [24527] = &_000672_hash, ++ [27419] = &_000673_hash, ++ [16439] = &_000674_hash, ++ [13869] = &_000675_hash, ++ [53150] = &_000676_hash, ++ [2884] = &_000677_hash, ++ [37954] = &_000678_hash, ++ [10362] = &_000679_hash, ++ [40083] = &_000680_hash, ++ [60499] = &_000681_hash, ++ [62810] = &_000682_hash, ++ [24682] = &_000683_hash, ++ [61793] = &_000684_hash, ++ [53395] = &_000685_hash, ++ [5519] = &_000686_hash, ++ [28637] = &_000687_hash, ++ [51689] = &_000688_hash, ++ [6783] = &_000689_hash, ++ [43312] = &_000690_hash, ++ [2373] = &_000691_hash, ++ [33482] = &_000692_hash, ++ [24886] = &_000693_hash, ++ [12838] = &_000694_hash, ++ [47012] = &_000695_hash, ++ [23691] = &_000696_hash, ++ [37924] = &_000697_hash, ++ [60399] = &_000698_hash, ++ [2312] = &_000699_hash, ++ [10805] = &_000700_hash, ++ [33996] = &_000701_hash, ++ [40856] = &_000702_hash, ++ [63446] = &_000703_hash, ++ [31205] = &_000704_hash, ++ [33100] = &_000705_hash, ++ [40843] = &_000706_hash, ++ [9845] = &_000707_hash, ++ [42227] = &_000708_hash, ++ [13114] = &_000709_hash, ++ [31563] = &_000710_hash, ++ [46922] = &_000711_hash, ++ [65339] = &_000712_hash, ++ [17658] = &_000713_hash, ++ [56374] = &_000714_hash, ++ [2434] = &_000715_hash, ++ [41487] = &_000716_hash, ++ [56728] = &_000717_hash, ++ [48150] = &_000718_hash, ++ [21221] = &_000719_hash, ++ [13905] = &_000720_hash, ++ [9054] = &_000721_hash, ++ [26108] = &_000722_hash, ++ [56819] = &_000723_hash, ++ [56038] = &_000724_hash, ++ [58328] = &_000725_hash, ++ [18609] = &_000726_hash, ++ [64412] = &_000727_hash, ++ [33020] = &_000728_hash, ++ [22639] = &_000729_hash, ++ [6453] = &_000730_hash, ++ [58692] = &_000731_hash, ++ [56471] = &_000732_hash, ++ [3589] = &_000733_hash, ++ [18936] = &_000734_hash, ++ [12558] = &_000735_hash, ++ [3201] = &_000736_hash, ++ [28175] = &_000738_hash, ++ [43888] = &_000740_hash, ++ [32456] = &_000741_hash, ++ [29036] = &_000742_hash, ++ [25603] = &_000743_hash, ++ [17675] = &_000744_hash, ++ [36271] = &_000745_hash, ++ [49814] = &_000746_hash, ++ [51009] = &_000747_hash, ++ [62835] = &_000748_hash, ++ [27139] = &_000749_hash, ++ [17186] = &_000750_hash, ++ [64437] = &_000751_hash, ++ [46825] = &_000752_hash, ++ [31408] = &_000754_hash, ++ [5396] = &_000755_hash, ++ [62247] = &_000756_hash, ++ [7946] = &_000757_hash, ++ [783] = &_000758_hash, ++ [7141] = &_000759_hash, ++ [7083] = &_000760_hash, ++ [15618] = &_000761_hash, ++ [61225] = &_000762_hash, ++ [13163] = &_000763_hash, ++ [8719] = &_000764_hash, ++ [8539] = &_000765_hash, ++ [23949] = &_000766_hash, ++ [32336] = &_000767_hash, ++ [62829] = &_000768_hash, ++ [1238] = &_000769_hash, ++ [8174] = &_000770_hash, ++ [6368] = &_000771_hash, ++ [29170] = &_000772_hash, ++ [9687] = &_000773_hash, ++ [61116] = &_000774_hash, ++ [31681] = &_000775_hash, ++ [22119] = &_000776_hash, ++ [59885] = &_000777_hash, ++ [47789] = &_000778_hash, ++ [5796] = &_000779_hash, ++ [34900] = &_000780_hash, ++ [49189] = &_000781_hash, ++ [48255] = &_000782_hash, ++ [62478] = &_000783_hash, ++ [34446] = &_000784_hash, ++ [52991] = &_000785_hash, ++ [38848] = &_000786_hash, ++ [57265] = &_000787_hash, ++ [11660] = &_000788_hash, ++ [26109] = &_000789_hash, ++ [43762] = &_000790_hash, ++ [57987] = &_000791_hash, ++ [23466] = &_000792_hash, ++ [5083] = &_000793_hash, ++ [1272] = &_000794_hash, ++ [10199] = &_000795_hash, ++ [34666] = &_000796_hash, ++ [49317] = &_000797_hash, ++ [18604] = &_000798_hash, ++ [61509] = &_000799_hash, ++ [36247] = &_000800_hash, ++ [41223] = &_000801_hash, ++ [59766] = &_000802_hash, ++ [34483] = &_000803_hash, ++ [52930] = &_000804_hash, ++ [40728] = &_000805_hash, ++ [55145] = &_000806_hash, ++ [45581] = &_000807_hash, ++ [64124] = &_000808_hash, ++ [48878] = &_000809_hash, ++ [63135] = &_000810_hash, ++ [40381] = &_000811_hash, ++ [54732] = &_000812_hash, ++ [45881] = &_000813_hash, ++ [64328] = &_000814_hash, ++ [22326] = &_000815_hash, ++ [8007] = &_000816_hash, ++ [23579] = &_000817_hash, ++ [5226] = &_000818_hash, ++ [12120] = &_000819_hash, ++ [26409] = &_000820_hash, ++ [14269] = &_000821_hash, ++ [32716] = &_000822_hash, ++ [48351] = &_000823_hash, ++ [62638] = &_000824_hash, ++ [33406] = &_000825_hash, ++ [51727] = &_000826_hash, ++ [51386] = &_000827_hash, ++ [58061] = &_000828_hash, ++ [57198] = &_000829_hash, ++ [30748] = &_000830_hash, ++ [60403] = &_000831_hash, ++ [24980] = &_000832_hash, ++ [55896] = &_000833_hash, ++ [57055] = &_000834_hash, ++ [46010] = &_000835_hash, ++ [59996] = &_000836_hash, ++ [45219] = &_000837_hash, ++ [14813] = &_000838_hash, ++ [29240] = &_000839_hash, ++ [42270] = &_000840_hash, ++ [46226] = &_000841_hash, ++ [24710] = &_000842_hash, ++ [51438] = &_000843_hash, ++ [22071] = &_000844_hash, ++ [49735] = &_000845_hash, ++ [63925] = &_000846_hash, ++ [30902] = &_000847_hash, ++ [53089] = &_000848_hash, ++ [18110] = &_000849_hash, ++ [34515] = &_000850_hash, ++ [19756] = &_000851_hash, ++ [5116] = &_000852_hash, ++ [50702] = &_000853_hash, ++ [59565] = &_000854_hash, ++ [61042] = &_000855_hash, ++ [14533] = &_000856_hash, ++ [23006] = &_000857_hash, ++ [4527] = &_000858_hash, ++ [8808] = &_000859_hash, ++ [52383] = &_000860_hash, ++ [30487] = &_000861_hash, ++ [30125] = &_000862_hash, ++ [33237] = &_000863_hash, ++ [40673] = &_000864_hash, ++ [48026] = &_000865_hash, ++ [64033] = &_000866_hash, ++ [13879] = &_000867_hash, ++ [60114] = &_000868_hash, ++ [19472] = &_000869_hash, ++ [33552] = &_000870_hash, ++ [19742] = &_000871_hash, ++ [3675] = &_000872_hash, ++ [15488] = &_000873_hash, ++ [29937] = &_000874_hash, ++ [12146] = &_000875_hash, ++ [26371] = &_000876_hash, ++ [27112] = &_000877_hash, ++ [8601] = &_000878_hash, ++ [1365] = &_000879_hash, ++ [19748] = &_000880_hash, ++ [46546] = &_000881_hash, ++ [64931] = &_000882_hash, ++ [34551] = &_000883_hash, ++ [52870] = &_000884_hash, ++ [17725] = &_000885_hash, ++ [3404] = &_000886_hash, ++ [36863] = &_000887_hash, ++ [51086] = &_000888_hash, ++ [11683] = &_000889_hash, ++ [26066] = &_000890_hash, ++ [39708] = &_000891_hash, ++ [54125] = &_000892_hash, ++ [20700] = &_000893_hash, ++ [6317] = &_000894_hash, ++ [14495] = &_000895_hash, ++ [28910] = &_000896_hash, ++ [18261] = &_000897_hash, ++ [3876] = &_000898_hash, ++ [58057] = &_000899_hash, ++ [29208] = &_000900_hash, ++ [18591] = &_000901_hash, ++ [24373] = &_000902_hash, ++ [45487] = &_000903_hash, ++ [29299] = &_000904_hash, ++ [53920] = &_000905_hash, ++ [25407] = &_000906_hash, ++ [5525] = &_000907_hash, ++ [3531] = &_000908_hash, ++ [25143] = &_000909_hash, ++ [56046] = &_000910_hash, ++ [34693] = &_000911_hash, ++ [48644] = &_000912_hash, ++ [21226] = &_000913_hash, ++ [14051] = &_000914_hash, ++ [7715] = &_000915_hash, ++ [30413] = &_000916_hash, ++ [13681] = &_000917_hash, ++ [6554] = &_000918_hash, ++ [12228] = &_000919_hash, ++ [25497] = &_000920_hash, ++ [52228] = &_000921_hash, ++ [51997] = &_000922_hash, ++ [49069] = &_000923_hash, ++ [26961] = &_000924_hash, ++ [13768] = &_000925_hash, ++ [41838] = &_000926_hash, ++ [3112] = &_000927_hash, ++ [48323] = &_000928_hash, ++ [47783] = &_000929_hash, ++ [62001] = &_000930_hash, ++ [35888] = &_000931_hash, ++ [752] = &_000933_hash, ++ [54718] = &_000934_hash, ++ [64177] = &_000935_hash, ++ [57222] = &_000936_hash, ++ [5260] = &_000938_hash, ++ [55517] = &_000939_hash, ++ [18186] = &_000940_hash, ++ [14257] = &_000941_hash, ++ [24596] = &_000942_hash, ++ [5968] = &_000943_hash, ++ [26846] = &_000944_hash, ++ [56097] = &_000945_hash, ++ [6543] = &_000946_hash, ++ [55151] = &_000947_hash, ++ [2999] = &_000949_hash, ++ [3602] = &_000951_hash, ++ [18460] = &_000952_hash, ++ [16521] = &_000953_hash, ++ [16745] = &_000954_hash, ++ [3507] = &_000955_hash, ++ [46802] = &_000956_hash, ++ [58077] = &_000957_hash, ++ [39846] = &_000958_hash, ++ [18629] = &_000959_hash, ++ [2723] = &_000960_hash, ++ [45230] = &_000961_hash, ++ [26941] = &_000962_hash, ++ [4344] = &_000963_hash, ++ [64674] = &_000964_hash, ++ [42551] = &_000965_hash, ++ [63272] = &_000966_hash, ++ [28261] = &_000967_hash, ++ [44694] = &_000968_hash, ++ [8573] = &_000969_hash, ++ [60174] = &_000970_hash, ++ [58458] = &_000971_hash, ++ [98] = &_000972_hash, ++ [47550] = &_000973_hash, ++ [10726] = &_000974_hash, ++ [11965] = &_000975_hash, ++ [38726] = &_000976_hash, ++ [41390] = &_000977_hash, ++ [55348] = &_000978_hash, ++ [28209] = &_000979_hash, ++ [45394] = &_000980_hash, ++ [26613] = &_000981_hash, ++ [29383] = &_000982_hash, ++ [34016] = &_000983_hash, ++ [39519] = &_000984_hash, ++ [27509] = &_000985_hash, ++ [57424] = &_000986_hash, ++ [26695] = &_000987_hash, ++ [21997] = &_000988_hash, ++ [22200] = &_000989_hash, ++ [60666] = &_000990_hash, ++ [22124] = &_000991_hash, ++ [1832] = &_000992_hash, ++ [25283] = &_000993_hash, ++ [10045] = &_000994_hash, ++ [30083] = &_000995_hash, ++ [45719] = &_000996_hash, ++ [3852] = &_000997_hash, ++ [38455] = &_000998_hash, ++ [43361] = &_000999_hash, ++ [52769] = &_001000_hash, ++ [14854] = &_001001_hash, ++ [38176] = &_001002_hash, ++ [29063] = &_001003_hash, ++ [4059] = &_001004_hash, ++ [54060] = &_001005_hash, ++ [4141] = &_001006_hash, ++ [33096] = &_001007_hash, ++ [2417] = &_001008_hash, ++ [1291] = &_001009_hash, ++ [14270] = &_001010_hash, ++ [38500] = &_001011_hash, ++ [2681] = &_001012_hash, ++ [50905] = &_001013_hash, ++ [8021] = &_001014_hash, ++ [6895] = &_001015_hash, ++ [15209] = &_001016_hash, ++ [62825] = &_001017_hash, ++ [10145] = &_001018_hash, ++ [10997] = &_001019_hash, ++ [48159] = &_001020_hash, ++ [2509] = &_001021_hash, ++ [47899] = &_001022_hash, ++ [25367] = &_001023_hash, ++ [44716] = &_001024_hash, ++ [55347] = &_001025_hash, ++ [14518] = &_001026_hash, ++ [33198] = &_001027_hash, ++ [23009] = &_001028_hash, ++ [22607] = &_001029_hash, ++ [13080] = &_001030_hash, ++ [54700] = &_001031_hash, ++ [10939] = &_001032_hash, ++ [25290] = &_001033_hash, ++ [38856] = &_001034_hash, ++ [57273] = &_001035_hash, ++ [2404] = &_001036_hash, ++ [64074] = &_001037_hash, ++ [7538] = &_001038_hash, ++ [19736] = &_001039_hash, ++ [8199] = &_001040_hash, ++ [40711] = &_001041_hash, ++ [47859] = &_001042_hash, ++ [53925] = &_001043_hash, ++ [37305] = &_001044_hash, ++ [18414] = &_001045_hash, ++ [62423] = &_001046_hash, ++ [30371] = &_001047_hash, ++ [14530] = &_001048_hash, ++ [48623] = &_001049_hash, ++ [36302] = &_001050_hash, ++ [12845] = &_001051_hash, ++ [24757] = &_001052_hash, ++ [54706] = &_001053_hash, ++ [27133] = &_001054_hash, ++ [64420] = &_001055_hash, ++ [25414] = &_001056_hash, ++ [64006] = &_001057_hash, ++ [11063] = &_001058_hash, ++ [45648] = &_001060_hash, ++ [21855] = &_001061_hash, ++ [54573] = &_001062_hash, ++ [48239] = &_001063_hash, ++ [18480] = &_001064_hash, ++ [37696] = &_001065_hash, ++ [32206] = &_001066_hash, ++ [42686] = &_001067_hash, ++ [6267] = &_001068_hash, ++ [22247] = &_001070_hash, ++ [9440] = &_001071_hash, ++ [54676] = &_001072_hash, ++ [53982] = &_001074_hash, ++ [41203] = &_001075_hash, ++ [21488] = &_001076_hash, ++ [61887] = &_001077_hash, ++ [7763] = &_001078_hash, ++ [37543] = &_001079_hash, ++ [17561] = &_001080_hash, ++ [63958] = &_001081_hash, ++ [28161] = &_001082_hash, ++ [57262] = &_001083_hash, ++ [61071] = &_001084_hash, ++ [20067] = &_001085_hash, ++ [34321] = &_001086_hash, ++ [38080] = &_001087_hash, ++ [56497] = &_001088_hash, ++ [16073] = &_001089_hash, ++ [30392] = &_001090_hash, ++ [14948] = &_001091_hash, ++ [29205] = &_001092_hash, ++ [12395] = &_001093_hash, ++ [30746] = &_001094_hash, ++ [27547] = &_001095_hash, ++ [42558] = &_001096_hash, ++ [61007] = &_001097_hash, ++ [37681] = &_001098_hash, ++ [56128] = &_001099_hash, ++ [22143] = &_001100_hash, ++ [52132] = &_001101_hash, ++ [38029] = &_001102_hash, ++ [64201] = &_001103_hash, ++ [55343] = &_001104_hash, ++ [40624] = &_001105_hash, ++ [38864] = &_001106_hash, ++ [22724] = &_001107_hash, ++ [43091] = &_001108_hash, ++ [33299] = &_001109_hash, ++ [9837] = &_001110_hash, ++ [26478] = &_001111_hash, ++ [57164] = &_001112_hash, ++ [38152] = &_001113_hash, ++ [18701] = &_001114_hash, ++ [5509] = &_001115_hash, ++ [39623] = &_001116_hash, ++ [53012] = &_001117_hash, ++ [50484] = &_001118_hash, ++ [59832] = &_001119_hash, ++ [17662] = &_001120_hash, ++ [25127] = &_001121_hash, ++ [54292] = &_001122_hash, ++ [30642] = &_001123_hash, ++ [39939] = &_001124_hash, ++ [34818] = &_001125_hash, ++ [11111] = &_001126_hash, ++ [64141] = &_001127_hash, ++ [20649] = &_001128_hash, ++ [58877] = &_001129_hash, ++ [13880] = &_001130_hash, ++ [54133] = &_001131_hash, ++ [21149] = &_001132_hash, ++ [62195] = &_001133_hash, ++ [14976] = &_001134_hash, ++ [52701] = &_001135_hash, ++ [29857] = &_001136_hash, ++ [49420] = &_001137_hash, ++ [45897] = &_001138_hash, ++ [15141] = &_001139_hash, ++ [24177] = &_001140_hash, ++ [35883] = &_001141_hash, ++ [51457] = &_001142_hash, ++ [1787] = &_001143_hash, ++ [10135] = &_001144_hash, ++ [952] = &_001145_hash, ++ [53578] = &_001146_hash, ++ [9923] = &_001147_hash, ++ [45249] = &_001148_hash, ++ [52860] = &_001149_hash, ++ [6681] = &_001150_hash, ++ [42360] = &_001151_hash, ++ [63619] = &_001152_hash, ++ [63988] = &_001153_hash, ++ [26378] = &_001154_hash, ++ [7768] = &_001155_hash, ++ [13738] = &_001156_hash, ++ [1480] = &_001157_hash, ++ [43806] = &_001158_hash, ++ [15976] = &_001159_hash, ++ [2732] = &_001160_hash, ++ [2519] = &_001161_hash, ++ [14340] = &_001162_hash, ++ [34772] = &_001163_hash, ++ [36433] = &_001164_hash, ++ [16068] = &_001165_hash, ++ [8929] = &_001166_hash, ++ [31814] = &_001167_hash, ++ [18246] = &_001168_hash, ++ [4932] = &_001169_hash, ++ [46960] = &_001170_hash, ++ [16909] = &_001171_hash, ++ [44429] = &_001172_hash, ++ [62760] = &_001173_hash, ++ [63230] = &_001174_hash, ++ [39532] = &_001175_hash, ++ [24688] = &_001176_hash, ++ [18555] = &_001177_hash, ++ [10719] = &_001178_hash, ++ [1644] = &_001179_hash, ++ [15109] = &_001180_hash, ++ [15787] = &_001181_hash, ++ [54445] = &_001182_hash, ++ [54827] = &_001183_hash, ++ [9488] = &_001184_hash, ++ [12587] = &_001185_hash, ++ [17124] = &_001186_hash, ++ [20681] = &_001187_hash, ++ [40386] = &_001188_hash, ++ [39444] = &_001189_hash, ++ [11290] = &_001190_hash, ++ [51313] = &_001191_hash, ++ [1703] = &_001193_hash, ++ [10828] = &_001194_hash, ++ [28007] = &_001197_hash, ++ [63410] = &_001198_hash, ++ [52437] = &_001199_hash, ++ [62762] = &_001200_hash, ++ [35188] = &_001201_hash, ++ [53701] = &_001202_hash, ++ [21195] = &_001203_hash, ++ [49081] = &_001204_hash, ++ [54624] = &_001205_hash, ++ [56081] = &_001206_hash, ++ [45494] = &_001207_hash, ++ [63706] = &_001208_hash, ++ [30644] = &_001209_hash, ++ [63391] = &_001210_hash, ++ [11655] = &_001212_hash, ++ [28229] = &_001213_hash, ++ [22382] = &_001214_hash, ++ [22649] = &_001215_hash, ++ [19761] = &_001216_hash, ++ [26212] = &_001217_hash, ++ [29203] = &_001218_hash, ++ [53604] = &_001219_hash, ++ [57936] = &_001220_hash, ++ [43029] = &_001221_hash, ++ [25452] = &_001222_hash, ++ [61614] = &_001223_hash, ++ [56321] = &_001224_hash, ++ [42691] = &_001225_hash, ++ [62052] = &_001226_hash, ++ [35556] = &_001227_hash, ++ [1019] = &_001231_hash, ++ [28818] = &_001233_hash, ++ [52880] = &_001234_hash, ++ [18895] = &_001235_hash, ++ [857] = &_001236_hash, ++ [45966] = &_001237_hash, ++ [11785] = &_001238_hash, ++ [7736] = &_001239_hash, ++ [4308] = &_001240_hash, ++ [51095] = &_001241_hash, ++ [427] = &_001242_hash, ++ [4021] = &_001243_hash, ++ [54201] = &_001244_hash, ++ [5615] = &_001245_hash, ++ [16234] = &_001246_hash, ++ [51718] = &_001247_hash, ++ [943] = &_001248_hash, ++ [32683] = &_001249_hash, ++ [63492] = &_001250_hash, ++ [14897] = &_001251_hash, ++ [23417] = &_001252_hash, ++ [32777] = &_001253_hash, ++ [38971] = &_001254_hash, ++ [33881] = &_001255_hash, ++ [25586] = &_001257_hash, ++ [58757] = &_001258_hash, ++ [19758] = &_001259_hash, ++ [23829] = &_001260_hash, ++ [26603] = &_001261_hash, ++ [50633] = &_001262_hash, ++ [19952] = &_001263_hash, ++ [7271] = &_001266_hash, ++ [36900] = &_001267_hash, ++ [54095] = &_001269_hash, ++ [46781] = &_001270_hash, ++ [33835] = &_001272_hash, ++ [23157] = &_001273_hash, ++ [6793] = &_001274_hash, ++ [53763] = &_001275_hash, ++ [57144] = &_001276_hash, ++ [24747] = &_001277_hash, ++ [2114] = &_001279_hash, ++ [48424] = &_001280_hash, ++ [61522] = &_001281_hash, ++ [31000] = &_001282_hash, ++ [51062] = &_001283_hash, ++ [46412] = &_001284_hash, ++ [23497] = &_001285_hash, ++ [10019] = &_001286_hash, ++ [5148] = &_001287_hash, ++ [62460] = &_001288_hash, ++ [45174] = &_001289_hash, ++ [31054] = &_001290_hash, ++ [6921] = &_001291_hash, ++ [22158] = &_001292_hash, ++ [18050] = &_001293_hash, ++ [30827] = &_001294_hash, ++ [27146] = &_001295_hash, ++ [38786] = &_001296_hash, ++ [7155] = &_001297_hash, ++ [14096] = &_001298_hash, ++ [20206] = &_001299_hash, ++ [23207] = &_001300_hash, ++ [51546] = &_001301_hash, ++ [10121] = &_001302_hash, ++ [27727] = &_001303_hash, ++ [52668] = &_001304_hash, ++ [9890] = &_001305_hash, ++ [17684] = &_001306_hash, ++ [23548] = &_001307_hash, ++ [41742] = &_001308_hash, ++ [59852] = &_001309_hash, ++ [13724] = &_001310_hash, ++ [48440] = &_001311_hash, ++ [17144] = &_001312_hash, ++ [37770] = &_001314_hash, ++ [34762] = &_001315_hash, ++ [29460] = &_001316_hash, ++ [9870] = &_001317_hash, ++ [54888] = &_001318_hash, ++ [52678] = &_001319_hash, ++ [36258] = &_001320_hash, ++ [1014] = &_001321_hash, ++ [64907] = &_001322_hash, ++ [38078] = &_001323_hash, ++ [45486] = &_001324_hash, ++ [57969] = &_001325_hash, ++ [50198] = &_001326_hash, ++ [9474] = &_001327_hash, ++ [48553] = &_001328_hash, ++ [45747] = &_001329_hash, ++ [43151] = &_001330_hash, ++ [15626] = &_001331_hash, ++ [36278] = &_001332_hash, ++ [2803] = &_001333_hash, ++ [42715] = &_001334_hash, ++ [12552] = &_001335_hash, ++ [58158] = &_001336_hash, ++ [13099] = &_001337_hash, ++ [40973] = &_001338_hash, ++ [20988] = &_001339_hash, ++ [16939] = &_001340_hash, ++ [48587] = &_001341_hash, ++ [38776] = &_001342_hash, ++ [55644] = &_001343_hash, ++ [26322] = &_001344_hash, ++ [2885] = &_001345_hash, ++ [11588] = &_001346_hash, ++ [23791] = &_001347_hash, ++ [6078] = &_001348_hash, ++ [127] = &_001349_hash, ++ [58554] = &_001350_hash, ++ [27701] = &_001351_hash, ++ [14768] = &_001352_hash, ++ [57854] = &_001353_hash, ++ [44290] = &_001354_hash, ++ [17364] = &_001355_hash, ++ [15077] = &_001356_hash, ++ [31912] = &_001357_hash, ++ [46277] = &_001358_hash, ++ [64770] = &_001359_hash, ++ [13193] = &_001360_hash, ++ [52889] = &_001361_hash, ++ [4360] = &_001362_hash, ++ [20905] = &_001363_hash, ++ [39009] = &_001364_hash, ++ [30479] = &_001365_hash, ++ [40885] = &_001366_hash, ++ [24139] = &_001367_hash, ++ [64604] = &_001368_hash, ++ [57819] = &_001369_hash, ++ [46888] = &_001370_hash, ++ [58430] = &_001371_hash, ++}; +diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c +new file mode 100644 +index 0000000..b378515 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin.c +@@ -0,0 +1,1185 @@ ++/* ++ * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c ++ * $ gcc -fplugin=size_overflow_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "intl.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "toplev.h" ++#include "function.h" ++#include "tree-flow.h" ++#include "plugin.h" ++#include "gimple.h" ++#include "c-common.h" ++#include "diagnostic.h" ++#include "cfgloop.h" ++ ++struct size_overflow_hash { ++ struct size_overflow_hash *next; ++ const char *name; ++ const char *file; ++ unsigned short param1:1; ++ unsigned short param2:1; ++ unsigned short param3:1; ++ unsigned short param4:1; ++ unsigned short param5:1; ++ unsigned short param6:1; ++ unsigned short param7:1; ++ unsigned short param8:1; ++ unsigned short param9:1; ++}; ++ ++#include "size_overflow_hash.h" ++ ++#define __unused __attribute__((__unused__)) ++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node)) ++#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node)) ++#define BEFORE_STMT true ++#define AFTER_STMT false ++#define CREATE_NEW_VAR NULL_TREE ++ ++int plugin_is_GPL_compatible; ++void debug_gimple_stmt(gimple gs); ++ ++static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var); ++static tree signed_size_overflow_type; ++static tree unsigned_size_overflow_type; ++static tree report_size_overflow_decl; ++static tree const_char_ptr_type_node; ++static unsigned int handle_function(void); ++static bool file_match = true; ++ ++static struct plugin_info size_overflow_plugin_info = { ++ .version = "20120520beta", ++ .help = "no-size_overflow\tturn off size overflow checking\n", ++}; ++ ++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count = type_num_arguments(*node); ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("handle_size_overflow_attribute: overflow parameter outside range."); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static struct attribute_spec no_size_overflow_attr = { ++ .name = "size_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = false, ++ .type_required = true, ++ .function_type_required = true, ++ .handler = handle_size_overflow_attribute ++}; ++ ++static void register_attributes(void __unused *event_data, void __unused *data) ++{ ++ register_attribute(&no_size_overflow_attr); ++} ++ ++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html ++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed) ++{ ++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); } ++#define cwmixa( in ) { cwfold( in, m, k, h ); } ++#define cwmixb( in ) { cwfold( in, n, h, k ); } ++ ++ const unsigned int m = 0x57559429; ++ const unsigned int n = 0x5052acdb; ++ const unsigned int *key4 = (const unsigned int *)key; ++ unsigned int h = len; ++ unsigned int k = len + seed + n; ++ unsigned long long p; ++ ++ while (len >= 8) { ++ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2; ++ len -= 8; ++ } ++ if (len >= 4) { ++ cwmixb(key4[0]) key4 += 1; ++ len -= 4; ++ } ++ if (len) ++ cwmixa(key4[0] & ((1 << (len * 8)) - 1 )); ++ cwmixb(h ^ (k + n)); ++ return k ^ h; ++ ++#undef cwfold ++#undef cwmixa ++#undef cwmixb ++} ++ ++static inline unsigned int get_hash_num(const char *fndecl, const char *loc_file, unsigned int seed) ++{ ++ unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff; ++ unsigned int file = CrapWow(loc_file, strlen(loc_file), seed) & 0xffff; ++ ++ return fn ^ file; ++} ++ ++static inline tree get_original_function_decl(tree fndecl) ++{ ++ if (DECL_ABSTRACT_ORIGIN(fndecl)) ++ return DECL_ABSTRACT_ORIGIN(fndecl); ++ return fndecl; ++} ++ ++static inline gimple get_def_stmt(tree node) ++{ ++ gcc_assert(TREE_CODE(node) == SSA_NAME); ++ return SSA_NAME_DEF_STMT(node); ++} ++ ++static struct size_overflow_hash *get_function_hash(tree fndecl, const char *loc_file) ++{ ++ unsigned int hash; ++ struct size_overflow_hash *entry; ++ const char *func_name = NAME(fndecl); ++ ++ hash = get_hash_num(NAME(fndecl), loc_file, 0); ++ ++ entry = size_overflow_hash[hash]; ++ while (entry) { ++ if (!strcmp(entry->name, func_name) && (!file_match || !strcmp(entry->file, loc_file))) ++ return entry; ++ entry = entry->next; ++ } ++ ++ return NULL; ++} ++ ++static void check_arg_type(tree var) ++{ ++ tree type = TREE_TYPE(var); ++ enum tree_code code = TREE_CODE(type); ++ ++ gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE || ++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) || ++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE)); ++} ++ ++static int find_arg_number(tree arg, tree func) ++{ ++ tree var; ++ bool match = false; ++ unsigned int argnum = 1; ++ ++ if (TREE_CODE(arg) == SSA_NAME) ++ arg = SSA_NAME_VAR(arg); ++ ++ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) { ++ if (strcmp(NAME(arg), NAME(var))) { ++ argnum++; ++ continue; ++ } ++ check_arg_type(var); ++ ++ match = true; ++ if (!TYPE_UNSIGNED(TREE_TYPE(var))) ++ return 0; ++ break; ++ } ++ if (!match) { ++ warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func)); ++ return 0; ++ } ++ return argnum; ++} ++ ++static void print_missing_msg(tree func, const char *filename, unsigned int argnum) ++{ ++ unsigned int new_hash; ++ location_t loc = DECL_SOURCE_LOCATION(func); ++ const char *curfunc = NAME(func); ++ ++ new_hash = get_hash_num(curfunc, filename, 0); ++ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s+", curfunc, curfunc, argnum, new_hash, filename); ++} ++ ++static void check_missing_attribute(tree arg) ++{ ++ tree type, func = get_original_function_decl(current_function_decl); ++ unsigned int argnum; ++ struct size_overflow_hash *hash; ++ const char *filename; ++ ++ gcc_assert(TREE_CODE(arg) != COMPONENT_REF); ++ ++ type = TREE_TYPE(arg); ++ // skip function pointers ++ if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE) ++ return; ++ ++ if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func)))) ++ return; ++ ++ argnum = find_arg_number(arg, func); ++ if (argnum == 0) ++ return; ++ ++ filename = DECL_SOURCE_FILE(func); ++ ++ hash = get_function_hash(func, filename); ++ if (!hash) { ++ print_missing_msg(func, filename, argnum); ++ return; ++ } ++ ++#define check_param(num) \ ++ if (num == argnum && hash->param##num) \ ++ return; ++ check_param(1); ++ check_param(2); ++ check_param(3); ++ check_param(4); ++ check_param(5); ++ check_param(6); ++ check_param(7); ++ check_param(8); ++ check_param(9); ++#undef check_param ++ ++ print_missing_msg(func, filename, argnum); ++} ++ ++static tree create_new_var(tree type) ++{ ++ tree new_var = create_tmp_var(type, "cicus"); ++ ++ add_referenced_var(new_var); ++ mark_sym_for_renaming(new_var); ++ return new_var; ++} ++ ++static bool is_bool(tree node) ++{ ++ tree type; ++ ++ if (node == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(node); ++ if (!INTEGRAL_TYPE_P(type)) ++ return false; ++ if (TREE_CODE(type) == BOOLEAN_TYPE) ++ return true; ++ if (TYPE_PRECISION(type) == 1) ++ return true; ++ return false; ++} ++ ++static tree cast_a_tree(tree type, tree var) ++{ ++ gcc_assert(fold_convertible_p(type, var)); ++ ++ return fold_convert(type, var); ++} ++ ++static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc) ++{ ++ gimple assign; ++ ++ if (new_var == CREATE_NEW_VAR) ++ new_var = create_new_var(type); ++ ++ assign = gimple_build_assign(new_var, cast_a_tree(type, var)); ++ gimple_set_location(assign, loc); ++ gimple_set_lhs(assign, make_ssa_name(new_var, assign)); ++ ++ return assign; ++} ++ ++static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before) ++{ ++ tree oldstmt_rhs1; ++ enum tree_code code; ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ ++ if (!*potentionally_overflowed) ++ return NULL_TREE; ++ ++ if (rhs1 == NULL_TREE) { ++ debug_gimple_stmt(oldstmt); ++ error("create_assign: rhs1 is NULL_TREE"); ++ gcc_unreachable(); ++ } ++ ++ oldstmt_rhs1 = gimple_assign_rhs1(oldstmt); ++ code = TREE_CODE(oldstmt_rhs1); ++ if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP)) ++ check_missing_attribute(oldstmt_rhs1); ++ ++ stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt)); ++ gsi = gsi_for_stmt(oldstmt); ++ if (lookup_stmt_eh_lp(oldstmt) != 0) { ++ basic_block next_bb, cur_bb; ++ edge e; ++ ++ gcc_assert(before == false); ++ gcc_assert(stmt_can_throw_internal(oldstmt)); ++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ cur_bb = gimple_bb(oldstmt); ++ next_bb = cur_bb->next_bb; ++ e = find_edge(cur_bb, next_bb); ++ gcc_assert(e != NULL); ++ gcc_assert(e->flags & EDGE_FALLTHRU); ++ ++ gsi = gsi_after_labels(next_bb); ++ gcc_assert(!gsi_end_p(gsi)); ++ before = true; ++ } ++ if (before) ++ gsi_insert_before(&gsi, stmt, GSI_NEW_STMT); ++ else ++ gsi_insert_after(&gsi, stmt, GSI_NEW_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited, oldstmt); ++ return gimple_get_lhs(stmt); ++} ++ ++static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3) ++{ ++ tree new_var, lhs = gimple_get_lhs(oldstmt); ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ ++ if (!*potentionally_overflowed) ++ return NULL_TREE; ++ ++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) { ++ rhs1 = gimple_assign_rhs1(oldstmt); ++ rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT); ++ } ++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) { ++ rhs2 = gimple_assign_rhs2(oldstmt); ++ rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT); ++ } ++ ++ stmt = gimple_copy(oldstmt); ++ gimple_set_location(stmt, gimple_location(oldstmt)); ++ ++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR) ++ gimple_assign_set_rhs_code(stmt, MULT_EXPR); ++ ++ if (is_bool(lhs)) ++ new_var = SSA_NAME_VAR(lhs); ++ else ++ new_var = create_new_var(signed_size_overflow_type); ++ new_var = make_ssa_name(new_var, stmt); ++ gimple_set_lhs(stmt, new_var); ++ ++ if (rhs1 != NULL_TREE) { ++ if (!gimple_assign_cast_p(oldstmt)) ++ rhs1 = cast_a_tree(signed_size_overflow_type, rhs1); ++ gimple_assign_set_rhs1(stmt, rhs1); ++ } ++ ++ if (rhs2 != NULL_TREE) ++ gimple_assign_set_rhs2(stmt, rhs2); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (rhs3 != NULL_TREE) ++ gimple_assign_set_rhs3(stmt, rhs3); ++#endif ++ gimple_set_vuse(stmt, gimple_vuse(oldstmt)); ++ gimple_set_vdef(stmt, gimple_vdef(oldstmt)); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited, oldstmt); ++ return gimple_get_lhs(stmt); ++} ++ ++static gimple overflow_create_phi_node(gimple oldstmt, tree var) ++{ ++ basic_block bb; ++ gimple phi; ++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt); ++ ++ bb = gsi_bb(gsi); ++ ++ phi = create_phi_node(var, bb); ++ gsi = gsi_last(phi_nodes(bb)); ++ gsi_remove(&gsi, false); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT); ++ gimple_set_bb(phi, bb); ++ return phi; ++} ++ ++static tree signed_cast_constant(tree node) ++{ ++ gcc_assert(is_gimple_constant(node)); ++ ++ return cast_a_tree(signed_size_overflow_type, node); ++} ++ ++static basic_block create_a_first_bb(void) ++{ ++ basic_block first_bb; ++ ++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR); ++ return first_bb; ++} ++ ++static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i) ++{ ++ basic_block bb; ++ gimple newstmt, def_stmt; ++ gimple_stmt_iterator gsi; ++ ++ newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt)); ++ if (TREE_CODE(arg) == SSA_NAME) { ++ def_stmt = get_def_stmt(arg); ++ if (gimple_code(def_stmt) != GIMPLE_NOP) { ++ gsi = gsi_for_stmt(def_stmt); ++ gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT); ++ return newstmt; ++ } ++ } ++ ++ bb = gimple_phi_arg_edge(oldstmt, i)->src; ++ if (bb->index == 0) ++ bb = create_a_first_bb(); ++ gsi = gsi_after_labels(bb); ++ gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT); ++ return newstmt; ++} ++ ++static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs) ++{ ++ gimple newstmt; ++ gimple_stmt_iterator gsi; ++ void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update); ++ gimple def_newstmt = get_def_stmt(new_rhs); ++ ++ gsi_insert = gsi_insert_after; ++ gsi = gsi_for_stmt(def_newstmt); ++ ++ switch (gimple_code(get_def_stmt(arg))) { ++ case GIMPLE_PHI: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ gsi = gsi_after_labels(gimple_bb(def_newstmt)); ++ gsi_insert = gsi_insert_before; ++ break; ++ case GIMPLE_ASM: ++ case GIMPLE_CALL: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ break; ++ case GIMPLE_ASSIGN: ++ newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt)); ++ break; ++ default: ++ /* unknown gimple_code (handle_build_new_phi_arg) */ ++ gcc_unreachable(); ++ } ++ ++ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt)); ++ gsi_insert(&gsi, newstmt, GSI_NEW_STMT); ++ update_stmt(newstmt); ++ return newstmt; ++} ++ ++static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var) ++{ ++ gimple newstmt; ++ tree new_rhs; ++ ++ new_rhs = expand(visited, potentionally_overflowed, arg); ++ ++ if (new_rhs == NULL_TREE) ++ return NULL_TREE; ++ ++ newstmt = handle_new_phi_arg(arg, new_var, new_rhs); ++ return gimple_get_lhs(newstmt); ++} ++ ++static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt) ++{ ++ gimple phi; ++ tree new_var = create_new_var(signed_size_overflow_type); ++ unsigned int i, n = gimple_phi_num_args(oldstmt); ++ ++ pointer_set_insert(visited, oldstmt); ++ phi = overflow_create_phi_node(oldstmt, new_var); ++ for (i = 0; i < n; i++) { ++ tree arg, lhs; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ if (is_gimple_constant(arg)) ++ arg = signed_cast_constant(arg); ++ lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var); ++ if (lhs == NULL_TREE) ++ lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i)); ++ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt)); ++ } ++ ++ update_stmt(phi); ++ return gimple_phi_result(phi); ++} ++ ++static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var) ++{ ++ gimple def_stmt = get_def_stmt(var); ++ tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt); ++ ++ *potentionally_overflowed = true; ++ new_rhs1 = expand(visited, potentionally_overflowed, rhs1); ++ if (new_rhs1 == NULL_TREE) { ++ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE) ++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT); ++ else ++ return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT); ++ } ++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE); ++} ++ ++static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var) ++{ ++ gimple def_stmt = get_def_stmt(var); ++ tree rhs1 = gimple_assign_rhs1(def_stmt); ++ ++ if (is_gimple_constant(rhs1)) ++ return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE); ++ ++ gcc_assert(TREE_CODE(rhs1) != COND_EXPR); ++ switch (TREE_CODE(rhs1)) { ++ case SSA_NAME: ++ return handle_unary_rhs(visited, potentionally_overflowed, var); ++ ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case ADDR_EXPR: ++ case COMPONENT_REF: ++ case INDIRECT_REF: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case PARM_DECL: ++ case TARGET_MEM_REF: ++ case VAR_DECL: ++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT); ++ ++ default: ++ debug_gimple_stmt(def_stmt); ++ debug_tree(rhs1); ++ gcc_unreachable(); ++ } ++} ++ ++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value) ++{ ++ gimple cond_stmt; ++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb); ++ ++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE); ++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(cond_stmt); ++} ++ ++static tree create_string_param(tree string) ++{ ++ tree i_type, a_type; ++ int length = TREE_STRING_LENGTH(string); ++ ++ gcc_assert(length > 0); ++ ++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1)); ++ a_type = build_array_type(char_type_node, i_type); ++ ++ TREE_TYPE(string) = a_type; ++ TREE_CONSTANT(string) = 1; ++ TREE_READONLY(string) = 1; ++ ++ return build1(ADDR_EXPR, ptr_type_node, string); ++} ++ ++static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg) ++{ ++ gimple func_stmt, def_stmt; ++ tree current_func, loc_file, loc_line; ++ expanded_location xloc; ++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true); ++ ++ def_stmt = get_def_stmt(arg); ++ xloc = expand_location(gimple_location(def_stmt)); ++ ++ if (!gimple_has_location(def_stmt)) { ++ xloc = expand_location(gimple_location(stmt)); ++ if (!gimple_has_location(stmt)) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ } ++ ++ loc_line = build_int_cstu(unsigned_type_node, xloc.line); ++ ++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file); ++ loc_file = create_string_param(loc_file); ++ ++ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl)); ++ current_func = create_string_param(current_func); ++ ++ // void report_size_overflow(const char *file, unsigned int line, const char *func) ++ func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func); ++ ++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING); ++} ++ ++static void __unused print_the_code_insertions(gimple stmt) ++{ ++ location_t loc = gimple_location(stmt); ++ ++ inform(loc, "Integer size_overflow check applied here."); ++} ++ ++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value) ++{ ++ basic_block cond_bb, join_bb, bb_true; ++ edge e; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ cond_bb = gimple_bb(stmt); ++ gsi_prev(&gsi); ++ if (gsi_end_p(gsi)) ++ e = split_block_after_labels(cond_bb); ++ else ++ e = split_block(cond_bb, gsi_stmt(gsi)); ++ cond_bb = e->src; ++ join_bb = e->dest; ++ e->flags = EDGE_FALSE_VALUE; ++ e->probability = REG_BR_PROB_BASE; ++ ++ bb_true = create_empty_bb(cond_bb); ++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE); ++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE); ++ make_edge(bb_true, join_bb, EDGE_FALLTHRU); ++ ++ if (dom_info_available_p(CDI_DOMINATORS)) { ++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb); ++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb); ++ } ++ ++ if (current_loops != NULL) { ++ gcc_assert(cond_bb->loop_father == join_bb->loop_father); ++ add_bb_to_loop(bb_true, cond_bb->loop_father); ++ } ++ ++ insert_cond(cond_bb, arg, cond_code, type_value); ++ insert_cond_result(bb_true, stmt, arg); ++ ++// print_the_code_insertions(stmt); ++} ++ ++static tree get_type_for_check(tree rhs) ++{ ++ tree def_rhs; ++ gimple def_stmt = get_def_stmt(rhs); ++ ++ if (!gimple_assign_cast_p(def_stmt)) ++ return TREE_TYPE(rhs); ++ def_rhs = gimple_assign_rhs1(def_stmt); ++ if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE) ++ return TREE_TYPE(def_rhs); ++ return TREE_TYPE(rhs); ++} ++ ++static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs) ++{ ++ gimple ucast_stmt; ++ gimple_stmt_iterator gsi; ++ location_t loc = gimple_location(stmt); ++ ++ ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc); ++ gsi = gsi_for_stmt(stmt); ++ gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT); ++ return ucast_stmt; ++} ++ ++static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed) ++{ ++ tree type_max, type_min, rhs_type; ++ gimple ucast_stmt; ++ ++ if (!*potentionally_overflowed) ++ return; ++ ++ rhs_type = get_type_for_check(rhs); ++ ++ if (TYPE_UNSIGNED(rhs_type)) { ++ ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs); ++ type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type)); ++ insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max); ++ } else { ++ type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type)); ++ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max); ++ ++ type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type)); ++ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min); ++ } ++} ++ ++static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree new_rhs, origtype = TREE_TYPE(orig_rhs); ++ ++ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN); ++ ++ new_rhs = expand(visited, potentionally_overflowed, orig_rhs); ++ if (new_rhs == NULL_TREE) ++ return NULL_TREE; ++ ++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt)); ++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT); ++ update_stmt(assign); ++ return gimple_get_lhs(assign); ++} ++ ++static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree)) ++{ ++ tree new_rhs, cast_rhs; ++ ++ if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR) ++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE); ++ ++ new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs); ++ if (new_rhs != NULL_TREE) { ++ gimple_assign_set_rhs(def_stmt, new_rhs); ++ update_stmt(def_stmt); ++ ++ cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs)); ++ ++ check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed); ++ } ++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT); ++} ++ ++static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var) ++{ ++ tree rhs1, rhs2; ++ gimple def_stmt = get_def_stmt(var); ++ tree new_rhs1 = NULL_TREE; ++ tree new_rhs2 = NULL_TREE; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ /* no DImode/TImode division in the 32/64 bit kernel */ ++ switch (gimple_assign_rhs_code(def_stmt)) { ++ case RDIV_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ case EXACT_DIV_EXPR: ++ case POINTER_PLUS_EXPR: ++ case BIT_AND_EXPR: ++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT); ++ default: ++ break; ++ } ++ ++ *potentionally_overflowed = true; ++ ++ if (TREE_CODE(rhs1) == SSA_NAME) ++ new_rhs1 = expand(visited, potentionally_overflowed, rhs1); ++ if (TREE_CODE(rhs2) == SSA_NAME) ++ new_rhs2 = expand(visited, potentionally_overflowed, rhs2); ++ ++ if (is_gimple_constant(rhs2)) ++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1); ++ ++ if (is_gimple_constant(rhs1)) ++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2); ++ ++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++#if BUILDING_GCC_VERSION >= 4007 ++static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs) ++{ ++ if (is_gimple_constant(rhs)) ++ return signed_cast_constant(rhs); ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return NULL_TREE; ++ return expand(visited, potentionally_overflowed, rhs); ++} ++ ++static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var) ++{ ++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3; ++ gimple def_stmt = get_def_stmt(var); ++ ++ *potentionally_overflowed = true; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs3 = gimple_assign_rhs3(def_stmt); ++ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1); ++ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2); ++ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3); ++ ++ if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE) ++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3); ++ error("handle_ternary_ops: unknown rhs"); ++ gcc_unreachable(); ++} ++#endif ++ ++static void set_size_overflow_type(tree node) ++{ ++ switch (TYPE_MODE(TREE_TYPE(node))) { ++ case SImode: ++ signed_size_overflow_type = intDI_type_node; ++ unsigned_size_overflow_type = unsigned_intDI_type_node; ++ break; ++ case DImode: ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) { ++ signed_size_overflow_type = intDI_type_node; ++ unsigned_size_overflow_type = unsigned_intDI_type_node; ++ } else { ++ signed_size_overflow_type = intTI_type_node; ++ unsigned_size_overflow_type = unsigned_intTI_type_node; ++ } ++ break; ++ default: ++ error("set_size_overflow_type: unsupported gcc configuration."); ++ gcc_unreachable(); ++ } ++} ++ ++static tree expand_visited(gimple def_stmt) ++{ ++ gimple tmp; ++ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt); ++ ++ gsi_next(&gsi); ++ tmp = gsi_stmt(gsi); ++ switch (gimple_code(tmp)) { ++ case GIMPLE_ASSIGN: ++ return gimple_get_lhs(tmp); ++ case GIMPLE_PHI: ++ return gimple_phi_result(tmp); ++ case GIMPLE_CALL: ++ return gimple_call_lhs(tmp); ++ default: ++ return NULL_TREE; ++ } ++} ++ ++static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var) ++{ ++ gimple def_stmt; ++ enum tree_code code = TREE_CODE(TREE_TYPE(var)); ++ ++ if (is_gimple_constant(var)) ++ return NULL_TREE; ++ ++ if (TREE_CODE(var) == ADDR_EXPR) ++ return NULL_TREE; ++ ++ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE); ++ if (code != INTEGER_TYPE) ++ return NULL_TREE; ++ ++ if (SSA_NAME_IS_DEFAULT_DEF(var)) { ++ check_missing_attribute(var); ++ return NULL_TREE; ++ } ++ ++ def_stmt = get_def_stmt(var); ++ ++ if (!def_stmt) ++ return NULL_TREE; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return expand_visited(def_stmt); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ check_missing_attribute(var); ++ return NULL_TREE; ++ case GIMPLE_PHI: ++ return build_new_phi(visited, potentionally_overflowed, def_stmt); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return handle_unary_ops(visited, potentionally_overflowed, var); ++ case 3: ++ return handle_binary_ops(visited, potentionally_overflowed, var); ++#if BUILDING_GCC_VERSION >= 4007 ++ case 4: ++ return handle_ternary_ops(visited, potentionally_overflowed, var); ++#endif ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ error("expand: unknown gimple code"); ++ gcc_unreachable(); ++ } ++} ++ ++static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree origtype = TREE_TYPE(origarg); ++ ++ gcc_assert(gimple_code(stmt) == GIMPLE_CALL); ++ ++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt)); ++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT); ++ update_stmt(assign); ++ ++ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign)); ++ update_stmt(stmt); ++} ++ ++static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl) ++{ ++ const char *origid; ++ tree arg, origarg; ++ ++ if (!DECL_ABSTRACT_ORIGIN(fndecl)) { ++ gcc_assert(gimple_call_num_args(stmt) > argnum); ++ return gimple_call_arg(stmt, argnum); ++ } ++ ++ origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl)); ++ while (origarg && argnum) { ++ argnum--; ++ origarg = TREE_CHAIN(origarg); ++ } ++ ++ gcc_assert(argnum == 0); ++ ++ gcc_assert(origarg != NULL_TREE); ++ origid = NAME(origarg); ++ for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) { ++ if (!strcmp(origid, NAME(arg))) ++ return arg; ++ } ++ return NULL_TREE; ++} ++ ++static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum) ++{ ++ struct pointer_set_t *visited; ++ tree arg, newarg, type_max; ++ gimple ucast_stmt; ++ bool potentionally_overflowed; ++ ++ arg = get_function_arg(argnum, stmt, fndecl); ++ if (arg == NULL_TREE) ++ return; ++ ++ if (is_gimple_constant(arg)) ++ return; ++ if (TREE_CODE(arg) != SSA_NAME) ++ return; ++ ++ check_arg_type(arg); ++ ++ set_size_overflow_type(arg); ++ ++ visited = pointer_set_create(); ++ potentionally_overflowed = false; ++ newarg = expand(visited, &potentionally_overflowed, arg); ++ pointer_set_destroy(visited); ++ ++ if (newarg == NULL_TREE || !potentionally_overflowed) ++ return; ++ ++ change_function_arg(stmt, arg, argnum, newarg); ++ ++ ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, newarg); ++ ++ type_max = build_int_cstu(unsigned_size_overflow_type, 0x7fffffff); ++ insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max); ++} ++ ++static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl) ++{ ++ tree p = TREE_VALUE(attr); ++ do { ++ handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1); ++ p = TREE_CHAIN(p); ++ } while (p); ++} ++ ++static void handle_function_by_hash(gimple stmt, tree fndecl) ++{ ++ tree orig_fndecl; ++ struct size_overflow_hash *hash; ++ const char *filename = DECL_SOURCE_FILE(fndecl); ++ ++ orig_fndecl = get_original_function_decl(fndecl); ++ hash = get_function_hash(orig_fndecl, filename); ++ if (!hash) ++ return; ++ ++#define search_param(argnum) \ ++ if (hash->param##argnum) \ ++ handle_function_arg(stmt, fndecl, argnum - 1); ++ ++ search_param(1); ++ search_param(2); ++ search_param(3); ++ search_param(4); ++ search_param(5); ++ search_param(6); ++ search_param(7); ++ search_param(8); ++ search_param(9); ++#undef search_param ++} ++ ++static unsigned int handle_function(void) ++{ ++ basic_block bb = ENTRY_BLOCK_PTR->next_bb; ++ int saved_last_basic_block = last_basic_block; ++ ++ do { ++ gimple_stmt_iterator gsi; ++ basic_block next = bb->next_bb; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ tree fndecl, attr; ++ gimple stmt = gsi_stmt(gsi); ++ ++ if (!(is_gimple_call(stmt))) ++ continue; ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (gimple_call_num_args(stmt) == 0) ++ continue; ++ attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl))); ++ if (!attr || !TREE_VALUE(attr)) ++ handle_function_by_hash(stmt, fndecl); ++ else ++ handle_function_by_attribute(stmt, attr, fndecl); ++ gsi = gsi_for_stmt(stmt); ++ } ++ bb = next; ++ } while (bb && bb->index <= saved_last_basic_block); ++ return 0; ++} ++ ++static struct gimple_opt_pass size_overflow_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "size_overflow", ++ .gate = NULL, ++ .execute = handle_function, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg | PROP_referenced_vars, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow ++ } ++}; ++ ++static void start_unit_callback(void __unused *gcc_data, void __unused *user_data) ++{ ++ tree fntype; ++ ++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0)); ++ ++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func) ++ fntype = build_function_type_list(void_type_node, ++ const_char_ptr_type_node, ++ unsigned_type_node, ++ const_char_ptr_type_node, ++ NULL_TREE); ++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype); ++ ++ DECL_ASSEMBLER_NAME(report_size_overflow_decl); ++ TREE_PUBLIC(report_size_overflow_decl) = 1; ++ DECL_EXTERNAL(report_size_overflow_decl) = 1; ++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1; ++} ++ ++extern struct gimple_opt_pass pass_dce; ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ ++ struct register_pass_info size_overflow_pass_info = { ++ .pass = &size_overflow_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "no-size-overflow")) { ++ enable = false; ++ continue; ++ } else if (!(strcmp(argv[i].key, "no-file-match"))) { ++ file_match = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info); ++ if (enable) { ++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c +new file mode 100644 +index 0000000..b87ec9d +--- /dev/null ++++ b/tools/gcc/stackleak_plugin.c +@@ -0,0 +1,313 @@ ++/* ++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help implement various PaX features ++ * ++ * - track lowest stack pointer ++ * ++ * TODO: ++ * - initialize all local variables ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static int track_frame_size = -1; ++static const char track_function[] = "pax_track_stack"; ++static const char check_function[] = "pax_check_alloca"; ++static bool init_locals; ++ ++static struct plugin_info stackleak_plugin_info = { ++ .version = "201203140940", ++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n" ++// "initialize-locals\t\tforcibly initialize all stack frames\n" ++}; ++ ++static bool gate_stackleak_track_stack(void); ++static unsigned int execute_stackleak_tree_instrument(void); ++static unsigned int execute_stackleak_final(void); ++ ++static struct gimple_opt_pass stackleak_tree_instrument_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "stackleak_tree_instrument", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_tree_instrument, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa ++ } ++}; ++ ++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "stackleak_final", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_final, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func ++ } ++}; ++ ++static bool gate_stackleak_track_stack(void) ++{ ++ return track_frame_size >= 0; ++} ++ ++static void stackleak_check_alloca(gimple_stmt_iterator *gsi) ++{ ++ gimple check_alloca; ++ tree fntype, fndecl, alloca_size; ++ ++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE); ++ fndecl = build_fn_decl(check_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_check_alloca(unsigned long size) ++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0); ++ check_alloca = gimple_build_call(fndecl, 1, alloca_size); ++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT); ++} ++ ++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi) ++{ ++ gimple track_stack; ++ tree fntype, fndecl; ++ ++ fntype = build_function_type_list(void_type_node, NULL_TREE); ++ fndecl = build_fn_decl(track_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_track_stack(void) ++ track_stack = gimple_build_call(fndecl, 0); ++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING); ++} ++ ++#if BUILDING_GCC_VERSION == 4005 ++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code) ++{ ++ tree fndecl; ++ ++ if (!is_gimple_call(stmt)) ++ return false; ++ fndecl = gimple_call_fndecl(stmt); ++ if (!fndecl) ++ return false; ++ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL) ++ return false; ++// print_node(stderr, "pax", fndecl, 4); ++ return DECL_FUNCTION_CODE(fndecl) == code; ++} ++#endif ++ ++static bool is_alloca(gimple stmt) ++{ ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) ++ return true; ++ ++#if BUILDING_GCC_VERSION >= 4007 ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ return true; ++#endif ++ ++ return false; ++} ++ ++static unsigned int execute_stackleak_tree_instrument(void) ++{ ++ basic_block bb, entry_bb; ++ bool prologue_instrumented = false, is_leaf = true; ++ ++ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt; ++ ++ stmt = gsi_stmt(gsi); ++ ++ if (is_gimple_call(stmt)) ++ is_leaf = false; ++ ++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450> ++ if (!is_alloca(stmt)) ++ continue; ++ ++ // 2. insert stack overflow check before each __builtin_alloca call ++ stackleak_check_alloca(&gsi); ++ ++ // 3. insert track call after each __builtin_alloca call ++ stackleak_add_instrumentation(&gsi); ++ if (bb == entry_bb) ++ prologue_instrumented = true; ++ } ++ } ++ ++ // special cases for some bad linux code: taking the address of static inline functions will materialize them ++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI ++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI. ++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here. ++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl)) ++ return 0; ++ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10)) ++ return 0; ++ ++ // 4. insert track call at the beginning ++ if (!prologue_instrumented) { ++ gimple_stmt_iterator gsi; ++ ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); ++ gsi = gsi_start_bb(bb); ++ stackleak_add_instrumentation(&gsi); ++ } ++ ++ return 0; ++} ++ ++static unsigned int execute_stackleak_final(void) ++{ ++ rtx insn; ++ ++ if (cfun->calls_alloca) ++ return 0; ++ ++ // keep calls only if function frame is big enough ++ if (get_frame_size() >= track_frame_size) ++ return 0; ++ ++ // 1. find pax_track_stack calls ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil)) ++ rtx body; ++ ++ if (!CALL_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) != CALL) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != MEM) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != SYMBOL_REF) ++ continue; ++ if (strcmp(XSTR(body, 0), track_function)) ++ continue; ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ // 2. delete call ++ insn = delete_insn_and_edges(insn); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION) ++ insn = delete_insn_and_edges(insn); ++#endif ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info stackleak_tree_instrument_pass_info = { ++ .pass = &stackleak_tree_instrument_pass.pass, ++// .reference_pass_name = "tree_profile", ++ .reference_pass_name = "optimized", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ struct register_pass_info stackleak_final_pass_info = { ++ .pass = &stackleak_final_rtl_opt_pass.pass, ++ .reference_pass_name = "final", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info); ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "track-lowest-sp")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ track_frame_size = atoi(argv[i].value); ++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0) ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ if (!strcmp(argv[i].key, "initialize-locals")) { ++ if (argv[i].value) { ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ init_locals = true; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info); ++ ++ return 0; ++} +diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c +index 83b3dde..835bee7 100644 +--- a/usr/gen_init_cpio.c ++++ b/usr/gen_init_cpio.c +@@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location, + int retval; + int rc = -1; + int namesize; +- int i; ++ unsigned int i; + + mode |= S_IFREG; + +@@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location) + *env_var = *expanded = '\0'; + strncat(env_var, start + 2, end - start - 2); + strncat(expanded, new_location, start - new_location); +- strncat(expanded, getenv(env_var), PATH_MAX); +- strncat(expanded, end + 1, PATH_MAX); ++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded)); ++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded)); + strncpy(new_location, expanded, PATH_MAX); ++ new_location[PATH_MAX] = 0; + } else + break; + } +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 4f3434f..fc63040 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -43,6 +43,8 @@ + #include <linux/swap.h> + #include <linux/bitops.h> + #include <linux/spinlock.h> ++#include <linux/namei.h> ++#include <linux/fs.h> + + #include <asm/processor.h> + #include <asm/io.h> +@@ -575,12 +577,73 @@ out: + return r; + } + ++/* ++ * We want to test whether the caller has been granted permissions to ++ * use this device. To be able to configure and control the device, ++ * the user needs access to PCI configuration space and BAR resources. ++ * These are accessed through PCI sysfs. PCI config space is often ++ * passed to the process calling this ioctl via file descriptor, so we ++ * can't rely on access to that file. We can check for permissions ++ * on each of the BAR resource files, which is a pretty clear ++ * indicator that the user has been granted access to the device. ++ */ ++static int probe_sysfs_permissions(struct pci_dev *dev) ++{ ++#ifdef CONFIG_SYSFS ++ int i; ++ bool bar_found = false; ++ ++ for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { ++ char *kpath, *syspath; ++ struct path path; ++ struct inode *inode; ++ int r; ++ ++ if (!pci_resource_len(dev, i)) ++ continue; ++ ++ kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); ++ if (!kpath) ++ return -ENOMEM; ++ ++ /* Per sysfs-rules, sysfs is always at /sys */ ++ syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); ++ kfree(kpath); ++ if (!syspath) ++ return -ENOMEM; ++ ++ r = kern_path(syspath, LOOKUP_FOLLOW, &path); ++ kfree(syspath); ++ if (r) ++ return r; ++ ++ inode = path.dentry->d_inode; ++ ++ r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); ++ path_put(&path); ++ if (r) ++ return r; ++ ++ bar_found = true; ++ } ++ ++ /* If no resources, probably something special */ ++ if (!bar_found) ++ return -EPERM; ++ ++ return 0; ++#else ++ return -EINVAL; /* No way to control the device without sysfs */ ++#endif ++} ++ + static int kvm_vm_ioctl_assign_device(struct kvm *kvm, + struct kvm_assigned_pci_dev *assigned_dev) + { + int r = 0; + struct kvm_assigned_dev_kernel *match; + struct pci_dev *dev; ++ u8 header_type; + + down_read(&kvm->slots_lock); + mutex_lock(&kvm->lock); +@@ -607,6 +670,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, + r = -EINVAL; + goto out_free; + } ++ ++ /* Don't allow bridges to be assigned */ ++ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); ++ if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { ++ r = -EPERM; ++ goto out_put; ++ } ++ ++ r = probe_sysfs_permissions(dev); ++ if (r) ++ goto out_put; ++ + if (pci_enable_device(dev)) { + printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); + r = -EBUSY; +@@ -2494,7 +2569,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void) + if (kvm_rebooting) + /* spin while reset goes on */ + while (true) +- ; ++ cpu_relax(); + /* Fault while not rebooting. We want the trace. */ + BUG(); + } +@@ -2714,7 +2789,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, + kvm_arch_vcpu_put(vcpu); + } + +-int kvm_init(void *opaque, unsigned int vcpu_size, ++int kvm_init(const void *opaque, unsigned int vcpu_size, + struct module *module) + { + int r; +@@ -2767,15 +2842,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size, + /* A kmem cache lets us meet the alignment requirements of fx_save. */ + kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, + __alignof__(struct kvm_vcpu), +- 0, NULL); ++ SLAB_USERCOPY, NULL); + if (!kvm_vcpu_cache) { + r = -ENOMEM; + goto out_free_5; + } + +- kvm_chardev_ops.owner = module; +- kvm_vm_fops.owner = module; +- kvm_vcpu_fops.owner = module; ++ pax_open_kernel(); ++ *(void **)&kvm_chardev_ops.owner = module; ++ *(void **)&kvm_vm_fops.owner = module; ++ *(void **)&kvm_vcpu_fops.owner = module; ++ pax_close_kernel(); + + r = misc_register(&kvm_dev); + if (r) { |